code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase :List[Any] = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase :Union[str, Any] = concatenate_datasets
lowerCamelCase :List[Any] = DownloadConfig
lowerCamelCase :Optional[Any] = DownloadManager
lowerCamelCase :Optional[Any] = DownloadMode
lowerCamelCase :int = DownloadConfig
lowerCamelCase :str = DownloadMode
lowerCamelCase :List[str] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 206 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Optional[Any] = 10
def _a (self ):
A_ : Dict = [1, 2, 3, 4]
A_ : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def _a (self ):
A_ : List[str] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
A_, A_ : Dict = process_story(lowercase )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[int] = """"""
A_, A_ : List[str] = process_story(lowercase )
self.assertEqual(lowercase , [] )
self.assertEqual(lowercase , [] )
def _a (self ):
A_ : Optional[Any] = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
A_, A_ : int = process_story(lowercase )
A_ : Optional[Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(lowercase , lowercase )
A_ : Dict = ["""It was the best of times."""]
self.assertEqual(lowercase , lowercase )
def _a (self ):
A_ : Optional[int] = torch.tensor([1, 2, 3, 4] )
A_ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() )
def _a (self ):
A_ : str = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() )
def _a (self ):
A_ : Any = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() )
def _a (self ):
A_ : List[Any] = 101
A_ : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ : Dict = compute_token_type_ids(lowercase , lowercase )
np.testing.assert_array_equal(lowercase , lowercase ) | 206 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ='''▁'''
__A ={'''vocab_file''': '''sentencepiece.bpe.model'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , lowercase = None , lowercase=None , **lowercase , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , tokenizer_file=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
lowerCamelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ = 1
lowerCamelCase_ = len(self.sp_model )
lowerCamelCase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase )
}
lowerCamelCase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.lang_code_to_id[self._src_lang]
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Tuple:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
lowerCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase ) -> int:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
lowerCamelCase_ = [1] * len(self.prefix_tokens )
lowerCamelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ = self.sp_model.PieceToId(lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ = "".join(lowercase ).replace(lowercase , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.lang_code_to_id[src_lang]
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.lang_code_to_id[lang]
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
| 359 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase = 16 , lowercase = 88 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = 32 , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = "geglu" , lowercase = None , ) -> Any:
super().__init__()
lowerCamelCase_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase_ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase_ = [1, 0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase = True , ) -> int:
lowerCamelCase_ = hidden_states
lowerCamelCase_ = []
lowerCamelCase_ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase_ = self.transformer_index_for_condition[i]
lowerCamelCase_ = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase )
| 47 | 0 |
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase__ = '''microsoft/speecht5_tts'''
lowerCamelCase__ = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
lowerCamelCase__ = '''text_reader'''
lowerCamelCase__ = SpeechTaProcessor
lowerCamelCase__ = SpeechTaForTextToSpeech
lowerCamelCase__ = SpeechTaHifiGan
lowerCamelCase__ = ['''text''']
lowerCamelCase__ = ['''audio''']
def __A ( self : List[Any] ) -> List[str]:
if self.post_processor is None:
SCREAMING_SNAKE_CASE_ = "microsoft/speecht5_hifigan"
super().setup()
def __A ( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Any=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.pre_processor(text=__A , return_tensors="pt" , truncation=__A )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
SCREAMING_SNAKE_CASE_ = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
SCREAMING_SNAKE_CASE_ = torch.tensor(embeddings_dataset[7_305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __A ( self : Optional[int] , __magic_name__ : Tuple ) -> List[Any]:
with torch.no_grad():
return self.model.generate_speech(**__A )
def __A ( self : Optional[Any] , __magic_name__ : int ) -> List[Any]:
with torch.no_grad():
return self.post_processor(__A ).cpu().detach()
| 118 |
"""simple docstring"""
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a =set()
# Replace all the whitespace in our sentence
a =input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase ) == 26
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a =[False] * 26
for char in input_str:
if char.islower():
a =True
elif char.isupper():
a =True
return all(lowercase )
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _A ( ):
"""simple docstring"""
from timeit import timeit
a ='''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowercase ) )
print(timeit('''is_pangram_faster()''' , setup=lowercase ) )
print(timeit('''is_pangram_fastest()''' , setup=lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 81 | 0 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = []
for d in reversed(UpperCamelCase__ ):
idx.append(flat_idx % d )
A__ = flat_idx // d
return tuple(reversed(UpperCamelCase__ ) )
@torch.jit.ignore
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , ):
"""simple docstring"""
def reduce_edge_list(UpperCamelCase__ ) -> None:
A__ = True
for i in range(len(UpperCamelCase__ ) ):
A__ = -1 * (i + 1)
l[reversed_idx] &= tally
A__ = l[reversed_idx]
if start_edges is None:
A__ = [s == 0 for s in start]
reduce_edge_list(UpperCamelCase__ )
if end_edges is None:
A__ = [e == (d - 1) for e, d in zip(UpperCamelCase__ , UpperCamelCase__ )]
reduce_edge_list(UpperCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCamelCase__ ) == 0:
return [()]
elif len(UpperCamelCase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
A__ = []
A__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCamelCase__ , UpperCamelCase__ ):
if s == e:
path_list.append(slice(UpperCamelCase__ , s + 1 ) )
else:
break
A__ = tuple(UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
# start == end, and we're done
if divergence_idx == len(UpperCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A__ = start[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A__ = end[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
A__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = t.shape[:no_batch_dims]
A__ = list(_flat_idx_to_idx(UpperCamelCase__ , UpperCamelCase__ ) )
# _get_minimal_slice_set is inclusive
A__ = list(_flat_idx_to_idx(flat_end - 1 , UpperCamelCase__ ) )
# Get an ordered list of slices to perform
A__ = _get_minimal_slice_set(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
A__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
if not (len(UpperCamelCase__ ) > 0):
raise ValueError('Must provide at least one input' )
A__ = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCamelCase__ )]
A__ = tuple([max(UpperCamelCase__ ) for s in zip(*UpperCamelCase__ )] )
def _prep_inputs(UpperCamelCase__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
A__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
A__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
A__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
A__ = tensor_tree_map(_prep_inputs , UpperCamelCase__ )
A__ = None
if _out is not None:
A__ = tensor_tree_map(lambda UpperCamelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
A__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
A__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCamelCase__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
A__ = 0
A__ = prepped_outputs
for _ in range(UpperCamelCase__ ):
# Chunk the input
if not low_mem:
A__ = _select_chunk
else:
A__ = partial(
_chunk_slice , flat_start=UpperCamelCase__ , flat_end=min(UpperCamelCase__ , i + chunk_size ) , no_batch_dims=len(UpperCamelCase__ ) , )
A__ = tensor_tree_map(UpperCamelCase__ , UpperCamelCase__ )
# Run the layer on the chunk
A__ = layer(**UpperCamelCase__ )
# Allocate space for the output
if out is None:
A__ = tensor_tree_map(lambda UpperCamelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
def assign(UpperCamelCase__ , UpperCamelCase__ ) -> None:
for k, v in da.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
assign(UpperCamelCase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
A__ = da[k]
assign(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for xa, xa in zip(UpperCamelCase__ , UpperCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
A__ = xa
elif isinstance(UpperCamelCase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
A__ = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
A__ = tensor_tree_map(lambda UpperCamelCase__ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCamelCase__ )
return out
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase = 5_12 ,) -> int:
A__ = max_chunk_size
A__ = None
A__ = None
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
A__ = [2**l for l in range(int(math.log(self.max_chunk_size ,2 ) ) + 1 )]
A__ = [c for c in candidates if c > min_chunk_size]
A__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__UpperCAmelCase ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCamelCase ,chunk_size=__lowerCamelCase )
return True
except RuntimeError:
return False
A__ = 0
A__ = len(__lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
A__ = test_chunk_size(candidates[i] )
if not viable:
A__ = (min_viable_chunk_size_index + i) // 2
else:
A__ = i
A__ = (i + len(__lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
A__ = True
for aa, aa in zip(__lowerCamelCase ,__lowerCamelCase ):
assert type(__lowerCamelCase ) == type(__lowerCamelCase )
if isinstance(__lowerCamelCase ,(list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCamelCase ,__lowerCamelCase )
elif isinstance(__lowerCamelCase ,__lowerCamelCase ):
A__ = [v for _, v in sorted(aa.items() ,key=lambda __UpperCAmelCase : x[0] )]
A__ = [v for _, v in sorted(aa.items() ,key=lambda __UpperCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCamelCase ,__lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> Union[str, Any]:
A__ = True
A__ = tree_map(lambda __UpperCAmelCase : a.shape if isinstance(__lowerCamelCase ,torch.Tensor ) else a ,__lowerCamelCase ,__lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCamelCase )
A__ = self._compare_arg_caches(self.cached_arg_data ,__lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
A__ = False
if not consistent:
A__ = self._determine_favorable_chunk_size(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,)
A__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 367 | """simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> int:
A__ = tempfile.mkdtemp()
A__ = BlipImageProcessor()
A__ = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
A__ = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
A__ = InstructBlipProcessor(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self ,**__UpperCAmelCase ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).tokenizer
def snake_case__ ( self ,**__UpperCAmelCase ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).image_processor
def snake_case__ ( self ,**__UpperCAmelCase ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).qformer_tokenizer
def snake_case__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> str:
A__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ) -> Any:
A__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,)
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
A__ = self.get_image_processor(do_normalize=__UpperCAmelCase ,padding_value=1.0 )
A__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer ,__UpperCAmelCase )
def snake_case__ ( self ) -> str:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__UpperCAmelCase ,return_tensors='np' )
A__ = processor(images=__UpperCAmelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = 'lower newer'
A__ = processor(text=__UpperCAmelCase )
A__ = tokenizer(__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase )
A__ = qformer_tokenizer(__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor['qformer_' + key] )
def snake_case__ ( self ) -> str:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=__UpperCAmelCase ,images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) ,['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] ,)
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__UpperCAmelCase )
A__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Any:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=__UpperCAmelCase ,images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) ,['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] ,)
| 154 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowerCAmelCase__ = """
Human: <<task>>
Assistant: """
lowerCAmelCase__ = """huggingface-tools/default-prompts"""
lowerCAmelCase__ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str="run" ) -> Dict:
'''simple docstring'''
if prompt_or_repo_id is None:
A__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
A__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(__lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 68 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
return "".join(sorted(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
return word_by_signature[signature(__lowerCAmelCase )]
_a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_a = sorted({word.strip().lower() for word in data.splitlines()})
_a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 39 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
def snake_case (A_ :int ):
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(A_ , A_ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
a : List[Any] = False
if num < 0:
a : Optional[int] = True
a : Dict = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 | 0 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a_ ( ) -> tuple[list[int], int]:
"""simple docstring"""
lowerCamelCase_ =[randint(-1000 , 1000 ) for i in range(10 )]
lowerCamelCase_ =randint(-5000 , 5000 )
return (arr, r)
a_ : Dict = make_dataset()
def a_ ( __snake_case : list[int] , __snake_case : int ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__snake_case , 3 ):
if sum(__snake_case ) == target:
return tuple(sorted(__snake_case ) )
return (0, 0, 0)
def a_ ( __snake_case : list[int] , __snake_case : int ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
lowerCamelCase_ =len(__snake_case )
for i in range(n - 1 ):
lowerCamelCase_, lowerCamelCase_ =i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a_ ( ) -> tuple[float, float]:
"""simple docstring"""
lowerCamelCase_ ='''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
lowerCamelCase_ ='''
triplet_sum1(*dataset)
'''
lowerCamelCase_ ='''
triplet_sum2(*dataset)
'''
lowerCamelCase_ =repeat(setup=__snake_case , stmt=__snake_case , repeat=5 , number=1_0000 )
lowerCamelCase_ =repeat(setup=__snake_case , stmt=__snake_case , repeat=5 , number=1_0000 )
return (min(__snake_case ), min(__snake_case ))
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ : List[str] = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 75 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = """▁"""
lowerCamelCase = {"""vocab_file""": """spiece.model"""}
lowerCamelCase = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
lowerCamelCase = {
"""google/reformer-crime-and-punishment""": 52_4288,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]="</s>" , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : int=[] , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__lowercase =vocab_file
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCAmelCase)
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase ={self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =None
return state
def __setstate__( self : Optional[int] , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : str):
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__lowercase =self.sp_model.IdToPiece(_lowerCAmelCase)
return token
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =[]
__lowercase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase) + token
__lowercase =[]
else:
current_sub_tokens.append(_lowerCAmelCase)
out_string += self.sp_model.decode(_lowerCAmelCase)
return out_string.strip()
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowercase =os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase , 'wb') as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 166 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ : Optional[Any] ={"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A_ : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] =logging.get_logger(__name__)
A_ : Optional[Any] ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : Tuple ={
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A_ : Any ={
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
A_ : List[str] ={
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[Any] = RoFormerTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , a__ ) != do_lower_case
or pre_tok_state.get('strip_accents' , a__ ) != strip_accents
):
_lowerCamelCase = getattr(a__ , pre_tok_state.pop('type' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = pre_tok_class(**a__ )
_lowerCamelCase = do_lower_case
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = BertPreTokenizer()
return state
def __setstate__( self , a__ ):
_lowerCamelCase = d
_lowerCamelCase = self.__dict__['_tokenizer'].get_vocab()
_lowerCamelCase = PreTokenizer.custom(JiebaPreTokenizer(a__ ) )
def snake_case_ ( self , a__ , a__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def snake_case_ ( self , a__ , a__=None , a__=None , a__=False , **a__ , ):
_lowerCamelCase = BertPreTokenizer()
return super().save_pretrained(a__ , a__ , a__ , a__ , **a__ )
| 80 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def UpperCamelCase( __UpperCamelCase : int ):
lowerCAmelCase_ : Tuple = prime_factors(__UpperCamelCase )
if is_square_free(__UpperCamelCase ):
return -1 if len(__UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : Optional[int] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __snake_case ( UpperCamelCase_ ):
_a = '''data2vec-vision'''
def __init__( self : Tuple , A_ : List[Any]=7_6_8 , A_ : Union[str, Any]=1_2 , A_ : Dict=1_2 , A_ : List[Any]=3_0_7_2 , A_ : Dict="gelu" , A_ : Tuple=0.0 , A_ : Dict=0.0 , A_ : List[str]=0.02 , A_ : List[str]=1e-12 , A_ : Tuple=2_2_4 , A_ : Dict=1_6 , A_ : Optional[int]=3 , A_ : Optional[int]=False , A_ : Any=False , A_ : Tuple=False , A_ : Optional[int]=False , A_ : int=0.1 , A_ : Union[str, Any]=0.1 , A_ : List[Any]=True , A_ : List[Any]=[3, 5, 7, 1_1] , A_ : Union[str, Any]=[1, 2, 3, 6] , A_ : Optional[int]=True , A_ : Any=0.4 , A_ : str=2_5_6 , A_ : Optional[int]=1 , A_ : str=False , A_ : Optional[int]=2_5_5 , **A_ : Optional[int] , ):
super().__init__(**A_)
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : List[str] = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : List[Any] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Any = use_mask_token
lowerCAmelCase_ : Optional[int] = use_absolute_position_embeddings
lowerCAmelCase_ : str = use_relative_position_bias
lowerCAmelCase_ : Optional[Any] = use_shared_relative_position_bias
lowerCAmelCase_ : Dict = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ : Any = out_indices
lowerCAmelCase_ : int = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ : Dict = use_auxiliary_head
lowerCAmelCase_ : str = auxiliary_loss_weight
lowerCAmelCase_ : Optional[Any] = auxiliary_channels
lowerCAmelCase_ : str = auxiliary_num_convs
lowerCAmelCase_ : str = auxiliary_concat_input
lowerCAmelCase_ : str = semantic_loss_ignore_index
class __snake_case ( UpperCamelCase_ ):
_a = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : Tuple):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def UpperCAmelCase__ ( self : Dict):
return 1e-4
| 103 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( a__ , unittest.TestCase ):
snake_case__ = MgpstrTokenizer
snake_case__ = False
snake_case__ = {}
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowerCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
lowerCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = 'tester'
lowerCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
lowerCAmelCase = tokenizer.encode([special_token] , add_special_tokens=_snake_case )
self.assertEqual(len(_snake_case ) , 1 )
lowerCAmelCase = tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
self.assertTrue(special_token not in decoded )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase ,lowerCAmelCase = self.get_input_output_texts(_snake_case )
lowerCAmelCase = tokenizer.tokenize(_snake_case )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case )
lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertNotEqual(len(_snake_case ) , 0 )
lowerCAmelCase = tokenizer.decode(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(text_a.replace(' ' , '' ) , _snake_case )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
| 309 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = """cvt"""
def __init__( self : Dict , snake_case_ : Optional[int]=3 , snake_case_ : Dict=[7, 3, 3] , snake_case_ : str=[4, 2, 2] , snake_case_ : Optional[int]=[2, 1, 1] , snake_case_ : Any=[6_4, 1_9_2, 3_8_4] , snake_case_ : List[Any]=[1, 3, 6] , snake_case_ : Tuple=[1, 2, 1_0] , snake_case_ : List[Any]=[4.0, 4.0, 4.0] , snake_case_ : Union[str, Any]=[0.0, 0.0, 0.0] , snake_case_ : List[str]=[0.0, 0.0, 0.0] , snake_case_ : Union[str, Any]=[0.0, 0.0, 0.1] , snake_case_ : int=[True, True, True] , snake_case_ : Optional[int]=[False, False, True] , snake_case_ : Dict=["dw_bn", "dw_bn", "dw_bn"] , snake_case_ : Union[str, Any]=[3, 3, 3] , snake_case_ : Optional[int]=[1, 1, 1] , snake_case_ : Union[str, Any]=[2, 2, 2] , snake_case_ : Union[str, Any]=[1, 1, 1] , snake_case_ : Optional[int]=[1, 1, 1] , snake_case_ : Optional[int]=0.0_2 , snake_case_ : Optional[int]=1e-12 , **snake_case_ : List[str] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = depth
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = drop_rate
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = cls_token
_UpperCAmelCase = qkv_projection_method
_UpperCAmelCase = kernel_qkv
_UpperCAmelCase = padding_kv
_UpperCAmelCase = stride_kv
_UpperCAmelCase = padding_q
_UpperCAmelCase = stride_q
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
| 22 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__SCREAMING_SNAKE_CASE :Optional[int] = '''▁'''
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = AlbertTokenizer
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : str=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="[CLS]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : str="<unk>" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : List[Any]="<pad>" , snake_case_ : List[str]="[CLS]" , snake_case_ : int="[MASK]" , **snake_case_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 22 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
SCREAMING_SNAKE_CASE__:Tuple = logging.getLogger(__name__)
@dataclass
class snake_case__ :
_snake_case : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default="""tab_fact""", metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}, )
_snake_case : int = field(
default=1_024, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
_snake_case : bool = field(
default=snake_case_, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
_snake_case : bool = field(
default=snake_case_, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
_snake_case : Optional[int] = field(
default=snake_case_, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
_snake_case : Optional[int] = field(
default=snake_case_, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
_snake_case : Optional[int] = field(
default=snake_case_, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
_snake_case : Optional[str] = field(
default=snake_case_, metadata={"""help""": """A csv or a json file containing the training data."""} )
_snake_case : Optional[str] = field(
default=snake_case_, metadata={"""help""": """A csv or a json file containing the validation data."""} )
_snake_case : Optional[str] = field(default=snake_case_, metadata={"""help""": """A csv or a json file containing the test data."""} )
def a__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
__a = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class snake_case__ :
_snake_case : str = field(
default=snake_case_, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case : Optional[str] = field(
default=snake_case_, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default=snake_case_, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default=snake_case_, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
_snake_case : bool = field(
default=snake_case_, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
_snake_case : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
_snake_case : bool = field(
default=snake_case_, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
def _lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
__a = training_args.get_process_log_level()
logger.setLevel(a )
datasets.utils.logging.set_verbosity(a )
transformers.utils.logging.set_verbosity(a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a = data_args.train_file.split("." )[-1]
__a = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
__a = load_dataset("csv" , data_files=a , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a = load_dataset("json" , data_files=a , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a = raw_datasets["train"].features["label"].names
__a = len(a )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a , )
__a = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a = {"Refused": 0, "Entailed": 1}
__a = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__a = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a ):
# Tokenize the texts
def _convert_table_text_to_pandas(a ):
__a = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
__a = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a = examples["statement"]
__a = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
__a = tokenizer(a , a , padding=a , max_length=a , truncation=a )
__a = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
__a = raw_datasets.map(
a , batched=a , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__a = raw_datasets["train"]
if data_args.max_train_samples is not None:
__a = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__a = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__a = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
__a = raw_datasets["test"]
if data_args.max_predict_samples is not None:
__a = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a ):
__a = p.predictions[0] if isinstance(p.predictions , a ) else p.predictions
__a = np.argmax(a , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a = default_data_collator
elif training_args.fpaa:
__a = DataCollatorWithPadding(a , pad_to_multiple_of=8 )
else:
__a = None
# Initialize our Trainer
__a = Trainer(
model=a , args=a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a , tokenizer=a , data_collator=a , )
# Training
if training_args.do_train:
__a = None
if training_args.resume_from_checkpoint is not None:
__a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a = last_checkpoint
__a = trainer.train(resume_from_checkpoint=a )
__a = train_result.metrics
__a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a )
)
__a = min(a , len(a ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , a )
trainer.save_metrics("train" , a )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__a = trainer.evaluate(eval_dataset=a )
__a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a )
__a = min(a , len(a ) )
trainer.log_metrics("eval" , a )
trainer.save_metrics("eval" , a )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a = predict_dataset.remove_columns("label" )
__a = trainer.predict(a , metric_key_prefix="predict" ).predictions
__a = np.argmax(a , axis=1 )
__a = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(a , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(a ):
__a = label_list[item]
writer.write(F"{index}\t{item}\n" )
__a = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**a )
else:
trainer.create_model_card(**a )
def _lowerCamelCase( a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 268 | """simple docstring"""
import math
def _lowerCamelCase( a ):
__a = []
__a = 2
__a = int(math.sqrt(a ) ) # Size of every segment
__a = [True] * (end + 1)
__a = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__a = False
start += 1
prime += in_prime
__a = end + 1
__a = min(2 * end , a )
while low <= n:
__a = [True] * (high - low + 1)
for each in in_prime:
__a = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__a = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__a = high + 1
__a = min(high + end , a )
return prime
print(sieve(10**6))
| 268 | 1 |
from collections.abc import Sequence
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> float:
if not arr:
return 0
lowercase : Any = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase : List[str] = 0.0
for num in arr:
lowercase : str = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase : List[Any] = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 20 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
def __init__( self ,snake_case ,):
'''simple docstring'''
lowercase : Any = parent
lowercase : Tuple = 13
lowercase : str = 7
lowercase : Dict = True
lowercase : Dict = True
lowercase : str = True
lowercase : List[str] = True
lowercase : int = True
lowercase : Union[str, Any] = False
lowercase : Dict = False
lowercase : List[Any] = False
lowercase : List[Any] = 2
lowercase : Optional[Any] = 99
lowercase : int = 0
lowercase : Tuple = 32
lowercase : int = 2
lowercase : Tuple = 4
lowercase : List[Any] = 0.1
lowercase : Tuple = 0.1
lowercase : List[Any] = 512
lowercase : int = 16
lowercase : Dict = 2
lowercase : int = 0.02
lowercase : Union[str, Any] = 3
lowercase : Any = 4
lowercase : List[Any] = """last"""
lowercase : Tuple = True
lowercase : List[Any] = None
lowercase : Any = 0
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
lowercase : Tuple = None
if self.use_input_lengths:
lowercase : List[str] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Tuple = None
if self.use_token_type_ids:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
lowercase : List[str] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : str = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
lowercase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertModel(config=snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : Optional[Any] = model(snake_case )
lowercase : List[Any] = [input_ids, input_mask]
lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertWithLMHeadModel(snake_case )
lowercase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertForQuestionAnsweringSimple(snake_case )
lowercase : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = TFFlaubertForSequenceClassification(snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : str = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : List[str] = TFFlaubertForTokenClassification(config=snake_case )
lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_choices
lowercase : Dict = TFFlaubertForMultipleChoice(config=snake_case )
lowercase : Any = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Dict = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = config_and_inputs
lowercase : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Optional[Any]= (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_a : Any= (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Tuple= False
_a : int= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = TFFlaubertModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFFlaubertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowercase : int = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
lowercase : Dict = model(snake_case )[0]
lowercase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice.
lowercase : Tuple = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 20 | 1 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE_ ) )
except Exception as e:
self.fail(f"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 352 |
'''simple docstring'''
import os
from distutils.util import strtobool
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]:
for e in env_keys:
A: Dict = int(os.environ.get(__lowercase , -1 ) )
if val >= 0:
return val
return default
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> List[str]:
A: str = os.environ.get(__lowercase , str(__lowercase ) )
return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int...
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="no" ) -> str:
A: Optional[int] = os.environ.get(__lowercase , str(__lowercase ) )
return value
| 334 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Union[str, Any]:
a =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
a =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Optional[int]:
a =parent
a =batch_size
a =seq_length
a =is_training
a =use_input_mask
a =use_token_type_ids
a =use_labels
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =type_sequence_label_size
a =initializer_range
a =num_labels
a =num_choices
a =scope
a =embedding_size
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a =None
if self.use_input_mask:
a =random_attention_mask([self.batch_size, self.seq_length] )
a =None
if self.use_token_type_ids:
a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a =None
a =None
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a =ids_tensor([self.batch_size] , self.num_choices )
a =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
a =TFMobileBertModel(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
a =[input_ids, input_mask]
a =model(__A )
a =model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
a =TFMobileBertForMaskedLM(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
a =TFMobileBertForNextSentencePrediction(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
a =TFMobileBertForPreTraining(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
a =self.num_labels
a =TFMobileBertForSequenceClassification(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
a =self.num_choices
a =TFMobileBertForMultipleChoice(config=__A )
a =tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
a =tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
a =tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
a ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
a =self.num_labels
a =TFMobileBertForTokenClassification(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
a =TFMobileBertForQuestionAnswering(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) =config_and_inputs
a ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =TFMobileBertModelTest.TFMobileBertModelTester(self )
a =ConfigTester(self , config_class=__A , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
a =TFMobileBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
a =tf.constant([[0, 1, 2, 3, 4, 5]] )
a =model(__A )[0]
a =[1, 6, 3_0522]
self.assertEqual(output.shape , __A )
a =tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1E-4 ) | 81 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : Optional[Any]=32 , snake_case_ : List[Any]=3 , snake_case_ : Dict=4 , snake_case_ : Tuple=[10, 20, 30, 40] , snake_case_ : int=[2, 2, 3, 2] , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=True , snake_case_ : Union[str, Any]=37 , snake_case_ : Any="gelu" , snake_case_ : Union[str, Any]=10 , snake_case_ : str=0.02 , snake_case_ : str=["stage2", "stage3", "stage4"] , snake_case_ : str=3 , snake_case_ : List[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = type_sequence_label_size
A__ = initializer_range
A__ = out_features
A__ = num_labels
A__ = scope
A__ = num_stages
def __magic_name__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __magic_name__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __magic_name__ ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ = UperNetForSemanticSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
A__ = UperNetModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
def __magic_name__ ( self : List[Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(snake_case_ )
A__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A__ = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = UperNetForSemanticSegmentation.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
A__ = Image.open(lowercase_ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> List[Any]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
| 247 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( snake_case_ : str ) ->YolosConfig:
'''simple docstring'''
__A : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__A : str = 192
__A : Any = 768
__A : Dict = 12
__A : Tuple = 3
__A : Union[str, Any] = [800, 1333]
__A : Tuple = False
elif yolos_name == "yolos_s_dWr":
__A : Dict = 330
__A : Dict = 14
__A : List[str] = 6
__A : List[str] = 1320
elif "yolos_s" in yolos_name:
__A : str = 384
__A : Tuple = 1536
__A : Dict = 12
__A : Tuple = 6
elif "yolos_b" in yolos_name:
__A : Union[str, Any] = [800, 1344]
__A : Tuple = 91
__A : Tuple = '''huggingface/label-files'''
__A : Union[str, Any] = '''coco-detection-id2label.json'''
__A : Tuple = json.load(open(hf_hub_download(snake_case_ ,snake_case_ ,repo_type='''dataset''' ) ,'''r''' ) )
__A : Tuple = {int(snake_case_ ): v for k, v in idalabel.items()}
__A : Dict = idalabel
__A : str = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( snake_case_ : dict ,snake_case_ : YolosConfig ,snake_case_ : bool = False ) ->str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__A : str = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__A : List[Any] = in_proj_weight[: config.hidden_size, :]
__A : str = in_proj_bias[: config.hidden_size]
__A : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A : str = in_proj_weight[-config.hidden_size :, :]
__A : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def __lowercase ( snake_case_ : str ) ->str:
'''simple docstring'''
if "backbone" in name:
__A : List[Any] = name.replace('''backbone''' ,'''vit''' )
if "cls_token" in name:
__A : Any = name.replace('''cls_token''' ,'''embeddings.cls_token''' )
if "det_token" in name:
__A : Any = name.replace('''det_token''' ,'''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
__A : int = name.replace('''mid_pos_embed''' ,'''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
__A : List[Any] = name.replace('''pos_embed''' ,'''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__A : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
__A : Optional[int] = name.replace('''blocks''' ,'''encoder.layer''' )
if "attn.proj" in name:
__A : List[str] = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
__A : Optional[int] = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
__A : Tuple = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
__A : Optional[Any] = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
__A : List[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
__A : str = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "class_embed" in name:
__A : int = name.replace('''class_embed''' ,'''class_labels_classifier''' )
if "bbox_embed" in name:
__A : Union[str, Any] = name.replace('''bbox_embed''' ,'''bbox_predictor''' )
if "vit.norm" in name:
__A : Any = name.replace('''vit.norm''' ,'''vit.layernorm''' )
return name
def __lowercase ( snake_case_ : dict ,snake_case_ : YolosForObjectDetection ) ->dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__A : List[Any] = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
__A : Tuple = key.split('''.''' )
__A : Union[str, Any] = int(key_split[2] )
__A : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__A : Union[str, Any] = val[:dim, :]
__A : Tuple = val[
dim : dim * 2, :
]
__A : Any = val[-dim:, :]
else:
__A : List[str] = val[:dim]
__A : int = val[dim : dim * 2]
__A : Optional[int] = val[-dim:]
else:
__A : Tuple = val
return orig_state_dict
def __lowercase ( ) ->torch.Tensor:
'''simple docstring'''
__A : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A : Dict = Image.open(requests.get(snake_case_ ,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def __lowercase ( snake_case_ : str ,snake_case_ : str ,snake_case_ : str ,snake_case_ : bool = False ) ->Optional[Any]:
'''simple docstring'''
__A : Optional[Any] = get_yolos_config(snake_case_ )
# load original state_dict
__A : Union[str, Any] = torch.load(snake_case_ ,map_location='''cpu''' )['''model''']
# load 🤗 model
__A : List[str] = YolosForObjectDetection(snake_case_ )
model.eval()
__A : List[Any] = convert_state_dict(snake_case_ ,snake_case_ )
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by YolosImageProcessor
__A : Tuple = 800 if yolos_name != '''yolos_ti''' else 512
__A : Tuple = YolosImageProcessor(format='''coco_detection''' ,size=snake_case_ )
__A : Tuple = image_processor(images=prepare_img() ,return_tensors='''pt''' )
__A : Any = model(**snake_case_ )
__A , __A : int = outputs.logits, outputs.pred_boxes
__A , __A : Optional[Any] = None, None
if yolos_name == "yolos_ti":
__A : str = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__A : int = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__A : List[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__A : Any = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__A : Union[str, Any] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__A : Optional[int] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__A : str = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__A : Optional[int] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__A : int = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__A : Union[str, Any] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] ,snake_case_ ,atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] ,snake_case_ ,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
__A : Any = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
__A : List[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case_ ,organization='''hustvl''' )
model.push_to_hub(snake_case_ ,organization='''hustvl''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 291 |
"""simple docstring"""
from math import factorial
def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->int:
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 291 | 1 |
def __lowercase ( _UpperCamelCase = 100 ) ->int:
"""simple docstring"""
lowercase : List[Any] = (n * (n + 1) // 2) ** 2
lowercase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 337 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A_ ( unittest.TestCase ):
def lowercase ( self : int ):
_UpperCAmelCase = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 1_2_8, "min_length": 1_2, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 1_4_2, "min_length": 5_6, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 6_2, "min_length": 1_1, "num_beams": 6},
}
}
_UpperCAmelCase = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 1_2_8,
"task_specific_params.summarization.min_length": 1_2,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 1_4_2,
"task_specific_params.summarization_cnn.min_length": 5_6,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 6_2,
"task_specific_params.summarization_xsum.min_length": 1_1,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(snake_case_ ) , snake_case_ )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case_ ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase ( self : List[str] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ ) , transpose(snake_case_ ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0) ) , transpose(snake_case_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase ( self : str ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ ) , transpose(snake_case_ ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0) ) , transpose(snake_case_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ ) , np.asarray(transpose(snake_case_ ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case_ )
self.assertTrue(np.allclose(transpose(snake_case_ , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case_ , axes=(1, 2, 0) ) ) ) )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3) ) , np.reshape(snake_case_ , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case_ , (1_2, 5) ) , np.reshape(snake_case_ , (1_2, 5) ) ) )
@require_torch
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3) ) , reshape(snake_case_ , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (1_2, 5) ) , reshape(snake_case_ , (1_2, 5) ).numpy() ) )
@require_tf
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3) ) , reshape(snake_case_ , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (1_2, 5) ) , reshape(snake_case_ , (1_2, 5) ).numpy() ) )
@require_flax
def lowercase ( self : int ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (4, 3) ) , np.asarray(reshape(snake_case_ , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case_ )
self.assertTrue(np.allclose(reshape(snake_case_ , (1_2, 5) ) , np.asarray(reshape(snake_case_ , (1_2, 5) ) ) ) )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case_ ) , np.squeeze(snake_case_ ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2 ) , np.squeeze(snake_case_ , axis=2 ) ) )
@require_torch
def lowercase ( self : Dict ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ ) , squeeze(snake_case_ ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2 ) , squeeze(snake_case_ , axis=2 ).numpy() ) )
@require_tf
def lowercase ( self : List[str] ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ ) , squeeze(snake_case_ ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2 ) , squeeze(snake_case_ , axis=2 ).numpy() ) )
@require_flax
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ ) , np.asarray(squeeze(snake_case_ ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case_ )
self.assertTrue(np.allclose(squeeze(snake_case_ , axis=2 ) , np.asarray(squeeze(snake_case_ , axis=2 ) ) ) )
def lowercase ( self : int ):
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1 ) , np.expand_dims(snake_case_ , axis=1 ) ) )
@require_torch
def lowercase ( self : List[Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case_ )
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1 ) , expand_dims(snake_case_ , axis=1 ).numpy() ) )
@require_tf
def lowercase ( self : str ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case_ )
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1 ) , expand_dims(snake_case_ , axis=1 ).numpy() ) )
@require_flax
def lowercase ( self : List[Any] ):
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case_ )
self.assertTrue(np.allclose(expand_dims(snake_case_ , axis=1 ) , np.asarray(expand_dims(snake_case_ , axis=1 ) ) ) )
| 156 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE :List[str] = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[PIL.Image.Image, np.ndarray]
class A_ ( lowerCAmelCase_ ):
def __init__( self : Any , snake_case_ : PriorTransformer , snake_case_ : CLIPVisionModel , snake_case_ : CLIPImageProcessor , snake_case_ : HeunDiscreteScheduler , snake_case_ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , )
def lowercase ( self : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
if latents is None:
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase = latents.to(snake_case_ )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : Optional[Any] , snake_case_ : Union[str, Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
def lowercase ( self : List[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] , ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 )
if not isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = self.image_processor(snake_case_ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=snake_case_ )
_UpperCAmelCase = self.image_encoder(snake_case_ )["last_hidden_state"]
_UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase = image_embeds.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = torch.zeros_like(snake_case_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self : str , snake_case_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , snake_case_ : int = 1 , snake_case_ : int = 2_5 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : float = 4.0 , snake_case_ : int = 6_4 , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase = len(snake_case_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}' )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# prior
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.prior.config.num_embeddings
_UpperCAmelCase = self.prior.config.embedding_dim
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
_UpperCAmelCase = self.prior(
snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase = self.scheduler.step(
snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case_ )
_UpperCAmelCase = []
for i, latent in enumerate(snake_case_ ):
print()
_UpperCAmelCase = self.renderer.decode(
latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(snake_case_ )
_UpperCAmelCase = torch.stack(snake_case_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_UpperCAmelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase = [self.numpy_to_pil(snake_case_ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case_ )
| 156 | 1 |
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = []
lowerCamelCase_ = 1
while len(UpperCAmelCase_ ) < 1E6:
constant.append(str(UpperCAmelCase_ ) )
i += 1
lowerCamelCase_ = "".join(UpperCAmelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 55 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE_ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
__lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 301 | 0 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowercase_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=9_9 , __UpperCamelCase=6_4 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=3_7 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=1_6 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
UpperCamelCase_ = vocab_size - 1
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = GPTNeoXModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = GPTNeoXForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = GPTNeoXForQuestionAnswering(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = GPTNeoXForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = GPTNeoXForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase )
UpperCamelCase_ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Tuple = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
A__ : Tuple = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : int = False
A__ : List[str] = False
A__ : List[Any] = False
A__ : Any = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = GPTNeoXModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=6_4 , num_attention_heads=8 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_ = None
self.model_tester.create_and_check_model_as_decoder(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = ids_tensor([1, 1_0] , config.vocab_size )
UpperCamelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase_ = GPTNeoXModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
UpperCamelCase_ = original_model(__UpperCamelCase ).last_hidden_state
UpperCamelCase_ = original_model(__UpperCamelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase_ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase_ = GPTNeoXModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
UpperCamelCase_ = scaled_model(__UpperCamelCase ).last_hidden_state
UpperCamelCase_ = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
@require_torch
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase_ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCamelCase )
UpperCamelCase_ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase_ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase_ = model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=2_0 )
UpperCamelCase_ = tokenizer.batch_decode(__UpperCamelCase )[0]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 371 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( a__ : Dict , a__ : Dict=None ) -> Union[str, Any]:
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
UpperCamelCase_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
UpperCamelCase_ = requests.get(a__ , headers=a__ ).json()
UpperCamelCase_ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
UpperCamelCase_ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(a__ ):
UpperCamelCase_ = requests.get(url + f'''&page={i + 2}''' , headers=a__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Any=None ) -> Optional[int]:
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
UpperCamelCase_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
UpperCamelCase_ = requests.get(a__ , headers=a__ ).json()
UpperCamelCase_ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
UpperCamelCase_ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(a__ ):
UpperCamelCase_ = requests.get(url + f'''&page={i + 2}''' , headers=a__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase__ ( a__ : Dict , a__ : Tuple , a__ : Union[str, Any] , a__ : List[Any] ) -> List[Any]:
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
UpperCamelCase_ = requests.get(a__ , headers=a__ , allow_redirects=a__ )
UpperCamelCase_ = result.headers["""Location"""]
UpperCamelCase_ = requests.get(a__ , allow_redirects=a__ )
UpperCamelCase_ = os.path.join(a__ , f'''{artifact_name}.zip''' )
with open(a__ , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( a__ : Dict , a__ : Tuple=None ) -> Optional[int]:
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = None
with zipfile.ZipFile(a__ ) as z:
for filename in z.namelist():
if not os.path.isdir(a__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(a__ ) as f:
for line in f:
UpperCamelCase_ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCamelCase_ = line[: line.index(""": """ )]
UpperCamelCase_ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
UpperCamelCase_ = line[len("""FAILED """ ) :]
failed_tests.append(a__ )
elif filename == "job_name.txt":
UpperCamelCase_ = line
if len(a__ ) != len(a__ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(a__ )} for `errors` '''
f'''and {len(a__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
UpperCamelCase_ = None
if job_name and job_links:
UpperCamelCase_ = job_links.get(a__ , a__ )
# A list with elements of the form (line of error, error, failed test)
UpperCamelCase_ = [x + [y] + [job_link] for x, y in zip(a__ , a__ )]
return result
def lowerCamelCase__ ( a__ : Any , a__ : Union[str, Any]=None ) -> Dict:
UpperCamelCase_ = []
UpperCamelCase_ = [os.path.join(a__ , a__ ) for p in os.listdir(a__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(a__ , job_links=a__ ) )
return errors
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Tuple=None ) -> List[Any]:
UpperCamelCase_ = Counter()
counter.update([x[1] for x in logs] )
UpperCamelCase_ = counter.most_common()
UpperCamelCase_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCamelCase_ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCamelCase_ = dict(sorted(r.items() , key=lambda a__ : item[1]["count"] , reverse=a__ ) )
return r
def lowerCamelCase__ ( a__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase_ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
UpperCamelCase_ = test.split("""/""" )[2]
else:
UpperCamelCase_ = None
return test
def lowerCamelCase__ ( a__ : List[str] , a__ : Optional[int]=None ) -> Dict:
UpperCamelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCamelCase_ = [x for x in logs if x[2] is not None]
UpperCamelCase_ = {x[2] for x in logs}
UpperCamelCase_ = {}
for test in tests:
UpperCamelCase_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCamelCase_ = counter.most_common()
UpperCamelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCamelCase_ = sum(error_counts.values() )
if n_errors > 0:
UpperCamelCase_ = {"""count""": n_errors, """errors""": error_counts}
UpperCamelCase_ = dict(sorted(r.items() , key=lambda a__ : item[1]["count"] , reverse=a__ ) )
return r
def lowerCamelCase__ ( a__ : Any ) -> List[Any]:
UpperCamelCase_ = """| no. | error | status |"""
UpperCamelCase_ = """|-:|:-|:-|"""
UpperCamelCase_ = [header, sep]
for error in reduced_by_error:
UpperCamelCase_ = reduced_by_error[error]["""count"""]
UpperCamelCase_ = f'''| {count} | {error[:100]} | |'''
lines.append(a__ )
return "\n".join(a__ )
def lowerCamelCase__ ( a__ : Optional[int] ) -> str:
UpperCamelCase_ = """| model | no. of errors | major error | count |"""
UpperCamelCase_ = """|-:|-:|-:|-:|"""
UpperCamelCase_ = [header, sep]
for model in reduced_by_model:
UpperCamelCase_ = reduced_by_model[model]["""count"""]
UpperCamelCase_ , UpperCamelCase_ = list(reduced_by_model[model]["""errors"""].items() )[0]
UpperCamelCase_ = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(a__ )
return "\n".join(a__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
_A = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_A = get_job_links(args.workflow_run_id, token=args.token)
_A = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_A = k.find(''' / ''')
_A = k[index + len(''' / ''') :]
_A = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_A = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_A = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_A = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_A = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_A = reduce_by_error(errors)
_A = reduce_by_model(errors)
_A = make_github_table(reduced_by_error)
_A = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 261 | 0 |
def a__ ( A_, A_ ):
'''simple docstring'''
def get_matched_characters(A_, A_ ) -> str:
__magic_name__ = []
__magic_name__ = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__magic_name__ = int(max(0, i - limit ) )
__magic_name__ = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A_ )
__magic_name__ = f'''{_stra[0:_stra.index(A_ )]} {_stra[_stra.index(A_ ) + 1:]}'''
return "".join(A_ )
# matching characters
__magic_name__ = get_matched_characters(A_, A_ )
__magic_name__ = get_matched_characters(A_, A_ )
__magic_name__ = len(A_ )
# transposition
__magic_name__ = (
len([(ca, ca) for ca, ca in zip(A_, A_ ) if ca != ca] ) // 2
)
if not match_count:
__magic_name__ = 0.0
else:
__magic_name__ = (
1
/ 3
* (
match_count / len(A_ )
+ match_count / len(A_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__magic_name__ = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 88 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowercase_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowercase_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ),
} ), )
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[List[List[str]]], _lowerCamelCase : List[List[str]], _lowerCamelCase : int = 1, _lowerCamelCase : int = 4, ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCamelCase, hypotheses=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase )
}
| 266 | 0 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=5 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=16 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=4 ,) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : int = seq_length
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Tuple = use_attention_mask
lowerCAmelCase__ : Union[str, Any] = use_token_type_ids
lowerCAmelCase__ : Union[str, Any] = use_labels
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Any = num_choices
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : str = None
if self.use_attention_mask:
lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ : int = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = config_and_inputs
lowerCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =True
snake_case_ =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = FlaxBertModelTester(self )
@slow
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
| 356 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] ={
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 94 | 0 |
import functools
from typing import Any
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ):
# Validation
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or len(SCREAMING_SNAKE_CASE__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not all(
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
__UpperCamelCase ={}
__UpperCamelCase ='WORD_KEEPER'
for word in words:
__UpperCamelCase =trie
for c in word:
if c not in trie_node:
__UpperCamelCase ={}
__UpperCamelCase =trie_node[c]
__UpperCamelCase =True
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE__ : int ) -> bool:
if index == len_string:
return True
__UpperCamelCase =trie
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =trie_node.get(string[i] , SCREAMING_SNAKE_CASE__ )
if trie_node is None:
return False
if trie_node.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip_vision_model"
def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =qkv_bias
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "instructblip_qformer"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =cross_attention_frequency
__UpperCamelCase =encoder_hidden_size
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip"
UpperCAmelCase__ : Optional[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]:
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase =InstructBlipVisionConfig(**A_ )
__UpperCamelCase =InstructBlipQFormerConfig(**A_ )
__UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase =self.text_config.tie_word_embeddings
__UpperCamelCase =self.text_config.is_encoder_decoder
__UpperCamelCase =num_query_tokens
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase =1.0
__UpperCamelCase =0.02
@classmethod
def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.qformer_config.to_dict()
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 62 | 1 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a ( _lowercase : Callable[[float], float] , _lowercase : float , _lowercase : float ):
'''simple docstring'''
__UpperCAmelCase : float = xa
__UpperCAmelCase : float = xa
while True:
if x_n == x_na or function(_lowercase ) == function(_lowercase ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
__UpperCAmelCase : float = x_na - (
function(_lowercase ) / ((function(_lowercase ) - function(_lowercase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCAmelCase : Union[str, Any] = x_na
__UpperCAmelCase : str = x_na
def _a ( _lowercase : float ):
'''simple docstring'''
return math.pow(_lowercase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 240 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCAmelCase :int = datasets.utils.logging.get_logger(__name__)
@dataclass
class a ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
SCREAMING_SNAKE_CASE : str = "utf-8"
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : bool = True # deprecated
SCREAMING_SNAKE_CASE : Optional[int] = None # deprecated
SCREAMING_SNAKE_CASE : int = 1_0 << 2_0 # 10MB
SCREAMING_SNAKE_CASE : Optional[bool] = None
class a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = JsonConfig
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
__UpperCAmelCase : Tuple = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self : Dict , snake_case : Tuple ) -> str:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__UpperCAmelCase : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
__UpperCAmelCase : Dict = data_files
if isinstance(snake_case , snake_case ):
__UpperCAmelCase : List[Any] = [files]
__UpperCAmelCase : Tuple = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__UpperCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
__UpperCAmelCase : Any = [files]
__UpperCAmelCase : Optional[int] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={'''files''': files} ) )
return splits
def lowerCamelCase__ ( self : List[str] , snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__UpperCAmelCase : Any = self.config.features.arrow_schema.field(snake_case ).type
__UpperCAmelCase : Dict = pa_table.append_column(snake_case , pa.array([None] * len(snake_case ) , type=snake_case ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase : Tuple = table_cast(snake_case , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self : Tuple , snake_case : Any ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase : Optional[int] = json.load(snake_case )
# We keep only the field we are interested in
__UpperCAmelCase : int = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case , (list, tuple) ):
__UpperCAmelCase : Optional[Any] = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase : Union[str, Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys}
else:
__UpperCAmelCase : Optional[int] = dataset
__UpperCAmelCase : Tuple = pa.Table.from_pydict(snake_case )
yield file_idx, self._cast_table(snake_case )
# If the file has one json object per line
else:
with open(snake_case , '''rb''' ) as f:
__UpperCAmelCase : Optional[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__UpperCAmelCase : int = max(self.config.chunksize // 32 , 16 << 10 )
__UpperCAmelCase : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
__UpperCAmelCase : List[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__UpperCAmelCase : Union[str, Any] = batch.decode(self.config.encoding , errors=snake_case ).encode('''utf-8''' )
try:
while True:
try:
__UpperCAmelCase : List[str] = paj.read_json(
io.BytesIO(snake_case ) , read_options=paj.ReadOptions(block_size=snake_case ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case , pa.ArrowInvalid )
and "straddling" not in str(snake_case )
or block_size > len(snake_case )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(snake_case )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__UpperCAmelCase : Optional[Any] = json.load(snake_case )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case , snake_case ): # list is the only sequence type supported in JSON
try:
__UpperCAmelCase : Dict = set().union(*[row.keys() for row in dataset] )
__UpperCAmelCase : Optional[Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys}
__UpperCAmelCase : Union[str, Any] = pa.Table.from_pydict(snake_case )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(snake_case )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
batch_idx += 1 | 240 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A_ : Union[str, Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = ['''key_proj''', '''value_proj''', '''query_proj''']
__UpperCAmelCase = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__UpperCAmelCase = key.split('''.''' )
if attributes[0] == "lm_head":
__UpperCAmelCase = prophet
__UpperCAmelCase = prophet_old
else:
__UpperCAmelCase = prophet.prophetnet
__UpperCAmelCase = prophet_old.model
__UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
__UpperCAmelCase = mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
__UpperCAmelCase = attribute
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
__UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
__UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE , '''in_proj_weight''' ):
__UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
__UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
__UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
__UpperCAmelCase = True
break
if attribute.isdigit():
__UpperCAmelCase = model[int(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = old_model[int(SCREAMING_SNAKE_CASE )]
else:
__UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_attribute == "":
__UpperCAmelCase = old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
__UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : int = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 333 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ):
# Load configuration defined in the metadata file
with open(__lowerCamelCase ) as metadata_file:
lowercase_ :Any = json.load(__lowerCamelCase )
lowercase_ :str = LukeConfig(use_entity_aware_attention=__lowerCamelCase ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowercase_ :int = torch.load(__lowerCamelCase ,map_location="cpu" )["module"]
# Load the entity vocab file
lowercase_ :int = load_original_entity_vocab(__lowerCamelCase )
# add an entry for [MASK2]
lowercase_ :str = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowercase_ :Tuple = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase_ :Tuple = AddedToken("<ent>" ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase )
lowercase_ :Union[str, Any] = AddedToken("<ent2>" ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase ,"tokenizer_config.json" ) ,"r" ) as f:
lowercase_ :Optional[int] = json.load(__lowerCamelCase )
lowercase_ :Dict = "MLukeTokenizer"
with open(os.path.join(__lowerCamelCase ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase )
with open(os.path.join(__lowerCamelCase ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Any = MLukeTokenizer.from_pretrained(__lowerCamelCase )
# Initialize the embeddings of the special tokens
lowercase_ :Dict = tokenizer.convert_tokens_to_ids(["@"] )[0]
lowercase_ :List[Any] = tokenizer.convert_tokens_to_ids(["#"] )[0]
lowercase_ :Dict = state_dict["embeddings.word_embeddings.weight"]
lowercase_ :Any = word_emb[ent_init_index].unsqueeze(0 )
lowercase_ :Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
lowercase_ :Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowercase_ :Any = state_dict[bias_name]
lowercase_ :Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
lowercase_ :Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
lowercase_ :Union[str, Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase_ :Union[str, Any] = F'encoder.layer.{layer_index}.attention.self.'
lowercase_ :int = state_dict[prefix + matrix_name]
lowercase_ :int = state_dict[prefix + matrix_name]
lowercase_ :List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase_ :Union[str, Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
lowercase_ :Dict = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
lowercase_ :List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowercase_ :int = state_dict["entity_predictions.bias"]
lowercase_ :str = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
lowercase_ :str = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowercase_ :str = LukeForMaskedLM(config=__lowerCamelCase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
lowercase_ :str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
lowercase_ :Optional[int] = state_dict[key]
else:
lowercase_ :str = state_dict[key]
lowercase_ :Optional[Any] = model.load_state_dict(__lowerCamelCase ,strict=__lowerCamelCase )
if set(__lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(__lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowercase_ :Optional[Any] = MLukeTokenizer.from_pretrained(__lowerCamelCase ,task="entity_classification" )
lowercase_ :Optional[Any] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
lowercase_ :Optional[Any] = (0, 9)
lowercase_ :Dict = tokenizer(__lowerCamelCase ,entity_spans=[span] ,return_tensors="pt" )
lowercase_ :Tuple = model(**__lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase_ :Optional[Any] = torch.Size((1, 33, 7_68) )
lowercase_ :Optional[Any] = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__lowerCamelCase ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase_ :List[Any] = torch.Size((1, 1, 7_68) )
lowercase_ :Union[str, Any] = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__lowerCamelCase ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowercase_ :List[str] = MLukeTokenizer.from_pretrained(__lowerCamelCase )
lowercase_ :Dict = "Tokyo is the capital of <mask>."
lowercase_ :List[Any] = (24, 30)
lowercase_ :int = tokenizer(__lowerCamelCase ,entity_spans=[span] ,return_tensors="pt" )
lowercase_ :str = model(**__lowerCamelCase )
lowercase_ :List[Any] = encoding["input_ids"][0].tolist()
lowercase_ :Any = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
lowercase_ :int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCamelCase )
lowercase_ :int = outputs.entity_logits[0][0].argmax().item()
lowercase_ :Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__lowerCamelCase ) )
model.save_pretrained(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : Dict ):
lowercase_ :List[str] = ["[MASK]", "[PAD]", "[UNK]"]
lowercase_ :Optional[int] = [json.loads(__lowerCamelCase ) for line in open(__lowerCamelCase )]
lowercase_ :List[str] = {}
for entry in data:
lowercase_ :str = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowercase_ :Tuple = entity_id
break
lowercase_ :Any = F'{language}:{entity_name}'
lowercase_ :Tuple = entity_id
return new_mapping
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCAmelCase : Dict =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 371 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase : Any ={
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase : int =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
__A = "maskformer"
__A = {"hidden_size": "mask_feature_size"}
__A = ["resnet", "swin"]
__A = ["detr"]
def __init__( self : List[Any] , lowercase : int = 256 , lowercase : int = 256 , lowercase : float = 0.1 , lowercase : bool = False , lowercase : Optional[Dict] = None , lowercase : Optional[Dict] = None , lowercase : float = 0.02 , lowercase : float = 1.0 , lowercase : float = 1.0 , lowercase : float = 1.0 , lowercase : float = 20.0 , lowercase : Optional[bool] = None , **lowercase : Any , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase_ :Any = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(lowercase , lowercase ):
lowercase_ :Optional[int] = backbone_config.pop("model_type" )
lowercase_ :Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowercase_ :int = config_class.from_dict(lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase_ :Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase_ :Tuple = (
decoder_config.pop("model_type" ) if isinstance(lowercase , lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(lowercase , lowercase ):
lowercase_ :str = CONFIG_MAPPING[decoder_type]
lowercase_ :List[str] = config_class.from_dict(lowercase )
lowercase_ :str = backbone_config
lowercase_ :Union[str, Any] = decoder_config
# main feature dimension for the model
lowercase_ :Any = fpn_feature_size
lowercase_ :Optional[int] = mask_feature_size
# initializer
lowercase_ :List[Any] = init_std
lowercase_ :Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
lowercase_ :List[str] = cross_entropy_weight
lowercase_ :int = dice_weight
lowercase_ :List[str] = mask_weight
lowercase_ :Optional[Any] = use_auxiliary_loss
lowercase_ :str = no_object_weight
lowercase_ :int = output_auxiliary_logits
lowercase_ :Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase_ :int = self.decoder_config.num_hidden_layers
super().__init__(**lowercase )
@classmethod
def lowercase__ ( cls : Tuple , lowercase : PretrainedConfig , lowercase : PretrainedConfig , **lowercase : Union[str, Any] ):
"""simple docstring"""
return cls(
backbone_config=lowercase , decoder_config=lowercase , **lowercase , )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :str = copy.deepcopy(self.__dict__ )
lowercase_ :int = self.backbone_config.to_dict()
lowercase_ :List[Any] = self.decoder_config.to_dict()
lowercase_ :Optional[Any] = self.__class__.model_type
return output
| 147 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : str = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 146 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__(self : Union[str, Any] , _A : Any , _A : Tuple=1_3 , _A : Optional[int]=7 , _A : Any=True , _A : str=True , _A : Union[str, Any]=True , _A : Optional[int]=True , _A : str=9_9 , _A : str=2_4 , _A : int=2 , _A : Optional[Any]=6 , _A : int=3_7 , _A : List[Any]="gelu" , _A : str=0.1 , _A : Dict=0.1 , _A : Dict=5_1_2 , _A : Tuple=1_6 , _A : List[str]=2 , _A : Dict=0.02 , _A : List[str]=3 , _A : Optional[Any]=None , _A : Dict=1_0_0_0 , ) -> Any:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = scope
snake_case = range_bbox
def UpperCAmelCase(self : List[str] ) -> List[str]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case = bbox[i, j, 3]
snake_case = bbox[i, j, 1]
snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case = bbox[i, j, 2]
snake_case = bbox[i, j, 0]
snake_case = t
snake_case = None
if self.use_input_mask:
snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase(self : Tuple ) -> Tuple:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase(self : List[str] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : Dict , _A : str , _A : Optional[Any] , _A : Tuple , ) -> Dict:
snake_case = LiltModel(config=_A )
model.to(_A )
model.eval()
snake_case = model(_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
snake_case = model(_A , bbox=_A , token_type_ids=_A )
snake_case = model(_A , bbox=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase(self : Optional[Any] , _A : Optional[int] , _A : Dict , _A : List[Any] , _A : Tuple , _A : Optional[int] , _A : Tuple , _A : Union[str, Any] , ) -> Optional[int]:
snake_case = self.num_labels
snake_case = LiltForTokenClassification(config=_A )
model.to(_A )
model.eval()
snake_case = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase(self : str , _A : List[Any] , _A : Union[str, Any] , _A : Any , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , ) -> Optional[int]:
snake_case = LiltForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
snake_case = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase(self : str ) -> str:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( A_ , A_ , A_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[int] = False
def UpperCAmelCase(self : Dict , _A : Optional[Any] , _A : Dict , _A : Union[str, Any] , _A : int , _A : Union[str, Any] ) -> int:
return True
def UpperCAmelCase(self : str ) -> Tuple:
snake_case = LiltModelTester(self )
snake_case = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def UpperCAmelCase(self : Optional[int] ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase(self : Tuple ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase(self : int ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase(self : Optional[Any] ) -> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def UpperCAmelCase(self : Optional[Any] ) -> Optional[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = LiltModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@slow
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Tuple ) -> Optional[int]:
snake_case = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_A )
snake_case = torch.tensor([[1, 2]] , device=_A )
snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_A )
# forward pass
with torch.no_grad():
snake_case = model(input_ids=_A , bbox=_A )
snake_case = torch.Size([1, 2, 7_6_8] )
snake_case = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_A , )
self.assertTrue(outputs.last_hidden_state.shape , _A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _A , atol=1E-3 ) )
| 137 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = 'encodec'
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , SCREAMING_SNAKE_CASE_ : Dict=2_4_0_0_0 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=1_2_8 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : Dict=[8, 5, 4, 2] , SCREAMING_SNAKE_CASE_ : Optional[Any]="weight_norm" , SCREAMING_SNAKE_CASE_ : str=7 , SCREAMING_SNAKE_CASE_ : Tuple=7 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str="reflect" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : str=1.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , **SCREAMING_SNAKE_CASE_ : Dict , ) -> int:
lowercase_ = target_bandwidths
lowercase_ = sampling_rate
lowercase_ = audio_channels
lowercase_ = normalize
lowercase_ = chunk_length_s
lowercase_ = overlap
lowercase_ = hidden_size
lowercase_ = num_filters
lowercase_ = num_residual_layers
lowercase_ = upsampling_ratios
lowercase_ = norm_type
lowercase_ = kernel_size
lowercase_ = last_kernel_size
lowercase_ = residual_kernel_size
lowercase_ = dilation_growth_rate
lowercase_ = use_causal_conv
lowercase_ = pad_mode
lowercase_ = compress
lowercase_ = num_lstm_layers
lowercase_ = trim_right_ratio
lowercase_ = codebook_size
lowercase_ = codebook_dim if codebook_dim is not None else hidden_size
lowercase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : List[str] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self : str ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowercase ( self : Optional[int] ) -> int:
lowercase_ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowercase ( self : str ) -> int:
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 | 1 |
'''simple docstring'''
from collections.abc import Callable
class a__ :
def __init__( self , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowercase : dict = {}
# Stores current size of heap.
_lowercase : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowercase : List[Any] = key or (lambda _UpperCamelCase : x)
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = int(2 * i + 2 )
return right if 0 < right < self.size else None
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowercase , _lowercase : Optional[int] = self.arr[j], self.arr[i]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = self._left(_UpperCamelCase )
_lowercase : int = self._right(_UpperCamelCase )
_lowercase : Tuple = i
if left is not None and not self._cmp(_UpperCamelCase , _UpperCamelCase ):
_lowercase : List[Any] = left
if right is not None and not self._cmp(_UpperCamelCase , _UpperCamelCase ):
_lowercase : Union[str, Any] = right
return valid_parent
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = self._parent(_UpperCamelCase )
while parent is not None and not self._cmp(_UpperCamelCase , _UpperCamelCase ):
self._swap(_UpperCamelCase , _UpperCamelCase )
_lowercase , _lowercase : Optional[Any] = parent, self._parent(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = self._get_valid_parent(_UpperCamelCase )
while valid_parent != index:
self._swap(_UpperCamelCase , _UpperCamelCase )
_lowercase , _lowercase : Tuple = valid_parent, self._get_valid_parent(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if item not in self.pos_map:
return
_lowercase : Union[str, Any] = self.pos_map[item]
_lowercase : List[Any] = [item, self.key(_UpperCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_UpperCamelCase )
self._heapify_down(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if item not in self.pos_map:
return
_lowercase : Tuple = self.pos_map[item]
del self.pos_map[item]
_lowercase : Dict = self.arr[self.size - 1]
_lowercase : Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_UpperCamelCase )
self._heapify_down(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_UpperCamelCase )] )
else:
_lowercase : Optional[Any] = [item, self.key(_UpperCamelCase )]
_lowercase : Optional[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.arr[0] if self.size else None
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( snake_case=32 , snake_case=10 , snake_case=1_00 , snake_case=10_26 , snake_case=True , snake_case="data/tokenized_stories_train_wikitext103.jbl" , snake_case="igf_context_pairs.jbl" , ) -> Optional[int]:
set_seed(3 )
# generate train_data and objective_set
_lowercase , _lowercase : List[str] = generate_datasets(
snake_case , snake_case , number=snake_case , min_len=10_26 , trim=snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_lowercase : int = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
_lowercase : str = load_gpta("gpt2" ).to(snake_case )
print("computing perplexity on objective set" )
_lowercase : Dict = compute_perplexity(snake_case , snake_case , snake_case ).item()
print("perplexity on objective set:" , snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( snake_case , snake_case=15 , snake_case=1_28 , snake_case=1_00 , snake_case="igf_model.pt" , ) -> Optional[Any]:
set_seed(42 )
# Load pre-trained model
_lowercase : Tuple = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
_lowercase : Any = SecondaryLearner(snake_case )
# Train secondary learner
_lowercase : Any = train_secondary_learner(
snake_case , snake_case , max_epochs=snake_case , batch_size=snake_case , eval_freq=1_00 , igf_model_path=snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( snake_case , snake_case , snake_case , snake_case=32 , snake_case=10_00 , snake_case=16 , snake_case=1.0 , snake_case=recopy_gpta , snake_case=None , snake_case=10 , snake_case="gpt2_finetuned.pt" , ) -> Dict:
_lowercase : str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
_lowercase : int = RandomSampler(snake_case )
_lowercase : int = DataLoader(snake_case , sampler=snake_case )
_lowercase : Tuple = max_steps // (len(snake_case )) + 1
_lowercase : Dict = 0
_lowercase : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case )
_lowercase , _lowercase , _lowercase : Union[str, Any] = recopy_model(snake_case , snake_case , snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case )
secondary_learner.eval()
_lowercase : Optional[Any] = []
_lowercase : Tuple = 0
_lowercase : int = []
_lowercase : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
_lowercase : Dict = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print("Test perplexity, step" , snake_case , ":" , snake_case )
for epoch in range(int(snake_case ) ):
for step, example in enumerate(snake_case ):
torch.cuda.empty_cache()
_lowercase : Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
_lowercase : Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_lowercase : Tuple = model(snake_case , labels=snake_case )
_lowercase : List[Any] = True
if secondary_learner is not None:
_lowercase : Dict = secondary_learner.forward(
torch.tensor(snake_case , dtype=torch.long , device=snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
_lowercase : Optional[Any] = -1
if predicted_q < threshold:
_lowercase : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_lowercase : Dict = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_lowercase : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_lowercase : Optional[Any] = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print("Test perplexity, step" , snake_case , ":" , snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ) -> Union[str, Any]:
_lowercase : Optional[Any] = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=snake_case , default=snake_case , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=snake_case , default=snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=snake_case , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=snake_case , default=snake_case , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=snake_case , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_00 , type=snake_case , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_00 , type=snake_case , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=10_00 , type=snake_case , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_28 , type=snake_case , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=snake_case , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=snake_case , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_00 , type=snake_case , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=10_26 , type=snake_case , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=snake_case , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=snake_case , type=snake_case , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=snake_case , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=snake_case , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=snake_case , type=snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
_lowercase : Any = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
_lowercase : Union[str, Any] = training_secondary_learner(
snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
_lowercase : Optional[Any] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
_lowercase , _lowercase : Optional[Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case , snake_case , snake_case , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=snake_case , secondary_learner=snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 199 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = DistilBertTokenizer
SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
SCREAMING_SNAKE_CASE = True
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
__a =tokenizer.encode('sequence builders' , add_special_tokens=__snake_case )
__a =tokenizer.encode('multi-sequence build' , add_special_tokens=__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 218 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'BridgeTowerImageProcessor'
SCREAMING_SNAKE_CASE = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
def __call__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 0 , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = True , __snake_case = None , **__snake_case , ) -> BatchEncoding:
'''simple docstring'''
__a =self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel_values + pixel_mask
__a =self.image_processor(
__snake_case , return_tensors=__snake_case , do_normalize=__snake_case , do_center_crop=__snake_case , **__snake_case )
encoding.update(__snake_case )
return encoding
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.tokenizer.model_input_names
__a =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 218 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __magic_name__ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : int =AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__lowercase ).to(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =AutoTokenizer.from_pretrained('''google/mt5-small''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
SCREAMING_SNAKE_CASE__ : int =tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
SCREAMING_SNAKE_CASE__ : Tuple =model(input_ids.to(__lowercase ) , labels=labels.to(__lowercase ) ).loss
SCREAMING_SNAKE_CASE__ : Any =-(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE__ : int =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 ) | 222 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """biogpt"""
def __init__( self : str , __lowercase : Union[str, Any]=4_23_84 , __lowercase : Union[str, Any]=10_24 , __lowercase : Any=24 , __lowercase : Any=16 , __lowercase : Optional[Any]=40_96 , __lowercase : Any="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Union[str, Any]=10_24 , __lowercase : List[Any]=0.02 , __lowercase : Tuple=1e-12 , __lowercase : Optional[Any]=True , __lowercase : Optional[Any]=True , __lowercase : Any=0.0 , __lowercase : int=0.0 , __lowercase : str=1 , __lowercase : int=0 , __lowercase : Optional[int]=2 , **__lowercase : Dict , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : int =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple =num_attention_heads
SCREAMING_SNAKE_CASE__ : Any =intermediate_size
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] =scale_embedding
SCREAMING_SNAKE_CASE__ : str =use_cache
SCREAMING_SNAKE_CASE__ : str =layerdrop
SCREAMING_SNAKE_CASE__ : Dict =activation_dropout
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) | 222 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = None
# Automatically constructed
lowercase_ = "dict"
lowercase_ = None
lowercase_ = field(default="Translation" , init=_a , repr=_a )
def __call__(self : Tuple) ->Any:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = None
lowercase_ = None
lowercase_ = None
# Automatically constructed
lowercase_ = "dict"
lowercase_ = None
lowercase_ = field(default="TranslationVariableLanguages" , init=_a , repr=_a )
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =sorted(set(self.languages)) if self.languages else None
lowerCamelCase__: Dict =len(self.languages) if self.languages else None
def __call__(self : Any) ->Dict:
'''simple docstring'''
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Dict) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] =set(self.languages)
if self.languages and set(A_) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(A_) - lang_set))}) are not in valid set ({", ".join(A_)}).""")
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase__: List[Any] =[]
for lang, text in translation_dict.items():
if isinstance(A_ , A_):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
lowerCamelCase__ , lowerCamelCase__: int =zip(*sorted(A_))
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
| 10 |
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=1_3 , _lowerCAmelCase : str=3_0 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : int=3 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[int]=3_2 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : int=3_7 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Union[str, Any]=1_0 , _lowerCAmelCase : List[Any]=0.02 , ):
'''simple docstring'''
__lowercase =parent
__lowercase =batch_size
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =is_training
__lowercase =use_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =type_sequence_label_size
__lowercase =initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase =(image_size // patch_size) ** 2
__lowercase =num_patches + 1
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowercase =ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =FlaxViTModel(config=_lowerCAmelCase)
__lowercase =model(_lowerCAmelCase)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowercase =(self.image_size, self.image_size)
__lowercase =(self.patch_size, self.patch_size)
__lowercase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =self.type_sequence_label_size
__lowercase =FlaxViTForImageClassification(config=_lowerCAmelCase)
__lowercase =model(_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowercase =1
__lowercase =FlaxViTForImageClassification(_lowerCAmelCase)
__lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowercase =model(_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) ,
) =config_and_inputs
__lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =FlaxViTModelTester(self)
__lowercase =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(_lowerCAmelCase)
__lowercase =inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase =[*signature.parameters.keys()]
__lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__lowercase =self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =model_class(_lowerCAmelCase)
@jax.jit
def model_jitted(_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Union[str, Any]):
return model(pixel_values=_lowerCAmelCase , **_lowerCAmelCase)
with self.subTest('JIT Enabled'):
__lowercase =model_jitted(**_lowerCAmelCase).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__lowercase =model_jitted(**_lowerCAmelCase).to_tuple()
self.assertEqual(len(_lowerCAmelCase) , len(_lowerCAmelCase))
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def __lowerCamelCase ( self : str):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase =model_class_name.from_pretrained('google/vit-base-patch16-224')
__lowercase =model(np.ones((1, 3, 2_2_4, 2_2_4)))
self.assertIsNotNone(_lowerCAmelCase)
| 48 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowercase =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase))))
__lowercase =['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w') as fp:
fp.write(json.dumps(_lowerCAmelCase))
with open(self.merges_file , 'w') as fp:
fp.write('\n'.join(_lowerCAmelCase))
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Any):
'''simple docstring'''
__lowercase ='lower newer'
__lowercase ='lower newer'
return input_text, output_text
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =XLMTokenizer(self.vocab_file , self.merges_file)
__lowercase ='lower'
__lowercase =['low', 'er</w>']
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =tokens + ['<unk>']
__lowercase =[1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , _lowerCAmelCase)
@slow
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
__lowercase =tokenizer.encode('sequence builders' , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase)
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 48 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__lowercase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : List[Any] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase_ : List[str] = get_sagemaker_input()
else:
lowerCamelCase_ : Optional[int] = get_cluster_input()
return config
def lowercase_ ( _lowercase=None ) -> Dict:
'''simple docstring'''
if subparsers is not None:
lowerCamelCase_ : List[str] = subparsers.add_parser('''config''' , description=UpperCamelCase_ )
else:
lowerCamelCase_ : str = argparse.ArgumentParser('''Accelerate config command''' , description=UpperCamelCase_ )
parser.add_argument(
'''--config_file''' , default=UpperCamelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase_ )
return parser
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : List[str] = get_user_input()
if args.config_file is not None:
lowerCamelCase_ : List[str] = args.config_file
else:
if not os.path.isdir(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(UpperCamelCase_ )
else:
config.to_yaml_file(UpperCamelCase_ )
print(F"""accelerate configuration saved at {config_file}""" )
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = config_command_parser()
lowerCamelCase_ : List[str] = parser.parse_args()
config_command(UpperCamelCase_ )
if __name__ == "__main__":
main()
| 318 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 343 | 0 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCAmelCase__ ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if index == len(lowerCAmelCase__ ):
return True
# Recursive Step
for i in range(lowerCAmelCase__ ):
if valid_coloring(graph[index] , lowerCAmelCase__ , lowerCAmelCase__ ):
# Color current vertex
__snake_case : Union[str, Any] = i
# Validate coloring
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index + 1 ):
return True
# Backtrack
__snake_case : Dict = -1
return False
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Dict = [-1] * len(lowerCAmelCase__ )
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 0 ):
return colored_vertices
return []
| 365 | import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 71 |
import os
from datetime import datetime as dt
from github import Github
A_ :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def A ( ) -> Any:
__UpperCamelCase : Any =Github(os.environ['GITHUB_TOKEN'] )
__UpperCamelCase : Union[str, Any] =g.get_repo('huggingface/accelerate' )
__UpperCamelCase : Tuple =repo.get_issues(state='open' )
for issue in open_issues:
__UpperCamelCase : List[Any] =sorted([comment for comment in issue.get_comments()] ,key=lambda a_ : i.created_at ,reverse=a_ )
__UpperCamelCase : str =comments[0] if len(a_ ) > 0 else None
__UpperCamelCase : Any =dt.utcnow()
__UpperCamelCase : List[str] =(current_time - issue.updated_at).days
__UpperCamelCase : Union[str, Any] =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 71 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def UpperCamelCase( lowercase_ = 1000000 , lowercase_ = 10 ) -> int:
'''simple docstring'''
snake_case_ = defaultdict(lowercase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""") | 34 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase_ = logging.getLogger(__name__)
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[int] = 'masked_bert'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="topK" , lowerCamelCase="constant" , lowerCamelCase=0.0 , **lowerCamelCase , ) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = pruning_method
snake_case_ = mask_init
snake_case_ = mask_scale | 34 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def A_ ( *lowercase , **lowercase ):
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_lowerCamelCase : Dict = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : int = object_detector(examples[0] , threshold=0.0 )
_lowerCamelCase : str = len(lowercase )
self.assertGreater(lowercase , 0 )
self.assertEqual(
lowercase , [
{
'score': ANY(lowercase ),
'label': ANY(lowercase ),
'box': {'xmin': ANY(lowercase ), 'ymin': ANY(lowercase ), 'xmax': ANY(lowercase ), 'ymax': ANY(lowercase )},
}
for i in range(lowercase )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def A_ ( self ):
pass
@require_torch
def A_ ( self ):
_lowerCamelCase : List[Any] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_lowerCamelCase : List[Any] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
_lowerCamelCase : List[str] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def A_ ( self ):
_lowerCamelCase : str = pipeline('zero-shot-object-detection' )
_lowerCamelCase : Tuple = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
_lowerCamelCase : Dict = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def A_ ( self ):
pass
@require_torch
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = 0.2
_lowerCamelCase : Any = pipeline('zero-shot-object-detection' )
_lowerCamelCase : List[str] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=lowercase , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def A_ ( self ):
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : str = pipeline('zero-shot-object-detection' )
_lowerCamelCase : Optional[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=lowercase , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , ) | 96 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ='''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
return image
def a_ ( __snake_case : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =dct.pop(__snake_case )
lowerCamelCase_ =val
def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCamelCase_ =state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCamelCase_ =torch.cat((q_bias, torch.zeros_like(__snake_case , requires_grad=__snake_case ), v_bias) )
lowerCamelCase_ =qkv_bias
def a_ ( __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =364 if '''coco''' in model_name else 224
lowerCamelCase_ =InstructBlipVisionConfig(image_size=__snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase_ =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCamelCase_ =LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCamelCase_ =InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
lowerCamelCase_ =InstructBlipConfig(vision_config=__snake_case , text_config=__snake_case , qformer_config=__snake_case )
return config, image_size
@torch.no_grad()
def a_ ( __snake_case : Optional[int] , __snake_case : str=None , __snake_case : List[Any]=False ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
lowerCamelCase_ =TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCamelCase_ =LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
lowerCamelCase_, lowerCamelCase_ =get_blipa_config(__snake_case )
lowerCamelCase_ =InstructBlipForConditionalGeneration(__snake_case ).eval()
lowerCamelCase_ ={
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
lowerCamelCase_, lowerCamelCase_ =model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowerCamelCase_ ='''cuda:1''' if torch.cuda.is_available() else '''cpu'''
lowerCamelCase_ ='''cuda:2''' if torch.cuda.is_available() else '''cpu'''
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =load_model_and_preprocess(
name=__snake_case , model_type=__snake_case , is_eval=__snake_case , device=__snake_case )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowerCamelCase_ =original_model.state_dict()
lowerCamelCase_ =create_rename_keys(__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase_ =state_dict.pop(__snake_case )
if key.startswith('''Qformer.bert''' ):
lowerCamelCase_ =key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowerCamelCase_ =key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
lowerCamelCase_ =key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowerCamelCase_ =key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
lowerCamelCase_ =key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
lowerCamelCase_ =key.replace('''t5''' , '''language''' )
lowerCamelCase_ =val
# read in qv biases
read_in_q_v_bias(__snake_case , __snake_case )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__snake_case , strict=__snake_case )
lowerCamelCase_ =load_demo_image()
lowerCamelCase_ ='''What is unusual about this image?'''
# create processor
lowerCamelCase_ =BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__snake_case , image_std=__snake_case )
lowerCamelCase_ =InstructBlipProcessor(
image_processor=__snake_case , tokenizer=__snake_case , qformer_tokenizer=__snake_case , )
lowerCamelCase_ =processor(images=__snake_case , text=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# make sure processor creates exact same pixel values
lowerCamelCase_ =vis_processors['''eval'''](__snake_case ).unsqueeze(0 ).to(__snake_case )
lowerCamelCase_ =inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __snake_case )
original_model.to(__snake_case )
hf_model.to(__snake_case )
with torch.no_grad():
if "vicuna" in model_name:
lowerCamelCase_ =original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
lowerCamelCase_ =hf_model(**__snake_case ).logits
else:
lowerCamelCase_ =original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
lowerCamelCase_ =tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(__snake_case )
lowerCamelCase_ =label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowerCamelCase_ =hf_model(**__snake_case , labels=__snake_case ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCamelCase_ =1e-4 if '''vicuna''' in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __snake_case , atol=__snake_case )
print('''Looks ok!''' )
print('''Generating with original model...''' )
lowerCamelCase_ =original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
lowerCamelCase_ =hf_model.generate(
**__snake_case , do_sample=__snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCamelCase_ =2
print('''Original generation:''' , __snake_case )
lowerCamelCase_ =processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )
lowerCamelCase_ =[text.strip() for text in output_text]
print('''HF generation:''' , __snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__snake_case )
hf_model.save_pretrained(__snake_case )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
a_ : Any = [
"""instructblip-vicuna-7b""",
"""instructblip-vicuna-13b""",
"""instructblip-flan-t5-xl""",
"""instructblip-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
a_ : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 75 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 270 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
__lowerCAmelCase = logging.getLogger(__name__)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "masked_bert"
def __init__(self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=0 , UpperCAmelCase="topK" , UpperCAmelCase="constant" , UpperCAmelCase=0.0 , **UpperCAmelCase , ) -> int:
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = pruning_method
_snake_case = mask_init
_snake_case = mask_scale | 270 | 1 |
snake_case_ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
snake_case_ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
snake_case_ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case_ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
snake_case_ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
snake_case_ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
snake_case_ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
snake_case_ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 214 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44 | 0 |
from __future__ import annotations
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : int ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase_= []
create_all_state(1 ,lowerCAmelCase_ ,lowerCAmelCase_ ,[] ,lowerCAmelCase_ )
return result
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : int ,lowerCAmelCase_ : int ,lowerCAmelCase_ : list[int] ,lowerCAmelCase_ : list[list[int]] ,) -> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowerCAmelCase_ ,total_number - level + 2 ):
current_list.append(lowerCAmelCase_ )
create_all_state(i + 1 ,lowerCAmelCase_ ,level - 1 ,lowerCAmelCase_ ,lowerCAmelCase_ )
current_list.pop()
def __a ( lowerCAmelCase_ : list[list[int]] ) -> None:
'''simple docstring'''
for i in total_list:
print(*lowerCAmelCase_ )
if __name__ == "__main__":
__A = 4
__A = 2
__A = generate_all_combinations(n, k)
print_all_state(total_list)
| 277 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __a ( ) -> str:
'''simple docstring'''
UpperCAmelCase_= torch.nn.Linear(2 ,4 )
UpperCAmelCase_= torch.optim.AdamW(model.parameters() ,lr=1.0 )
UpperCAmelCase_= torch.optim.lr_scheduler.OneCycleLR(lowerCAmelCase_ ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __a ( lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __a ( lowerCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCAmelCase_ )
class lowercase ( snake_case__):
"""simple docstring"""
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_= GradientState()
assert state.num_steps == 1
UpperCAmelCase_= 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCAmelCase_= False
assert state.sync_gradients is False
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
)= accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__UpperCAmelCase : Dict , **__UpperCAmelCase : Tuple ):
pass
with patch("""torch.cuda.set_device""" , __UpperCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
UpperCAmelCase_= Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= get_signature(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= get_signature(__UpperCAmelCase )
# saving hook
def save_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ):
UpperCAmelCase_= {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# loading hook
def load_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ):
with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """r""" ) as f:
UpperCAmelCase_= json.load(__UpperCAmelCase )
UpperCAmelCase_= config["""class_name"""]
UpperCAmelCase_= accelerator.register_save_state_pre_hook(__UpperCAmelCase )
UpperCAmelCase_= accelerator.register_load_state_pre_hook(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_= """random"""
# make sure loaded weights match with hooks
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_= """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
UpperCAmelCase_= None
# This should work
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(dummy_obj is None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
UpperCAmelCase_= [1, 2, 3]
# This should work
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map={"""""": 0} , )
UpperCAmelCase_= Accelerator()
# This should work
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= Accelerator()
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= """cpu"""
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase )
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
UpperCAmelCase_= Accelerator()
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
UpperCAmelCase_= Accelerator()
# This should work
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_= torch.nn.Linear(10 , 10 )
UpperCAmelCase_= torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase )
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
| 277 | 1 |
import math
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = [True] * n
lowercase__ : Optional[int] = False
lowercase__ : str = False
lowercase__ : int = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowercase__ : List[str] = i * 2
while index < n:
lowercase__ : Optional[int] = False
lowercase__ : Union[str, Any] = index + i
lowercase__ : Tuple = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def __lowerCamelCase ( lowerCamelCase__ = 999_966_663_333 ):
"""simple docstring"""
lowercase__ : Dict = math.floor(math.sqrt(lowerCamelCase__ ) ) + 100
lowercase__ : Tuple = prime_sieve(lowerCamelCase__ )
lowercase__ : List[str] = 0
lowercase__ : Union[str, Any] = 0
lowercase__ : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
lowercase__ : Tuple = primes[prime_index + 1]
lowercase__ : str = last_prime**2
lowercase__ : List[Any] = next_prime**2
# Get numbers divisible by lps(current)
lowercase__ : Union[str, Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowercase__ : Tuple = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowercase__ : Any = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowercase__ : Tuple = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 130 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
_UpperCAmelCase = re.compile(r"""([a-z\d])([A-Z])""")
_UpperCAmelCase = re.compile(r"""(?<!_)_(?!_)""")
_UpperCAmelCase = re.compile(r"""(_{2,})""")
_UpperCAmelCase = r"""^\w+(\.\w+)*$"""
_UpperCAmelCase = r"""<>:/\|?*"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_uppercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
SCREAMING_SNAKE_CASE_: str =_lowercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
return name.lower()
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_single_underscore_re.split(lowercase )
SCREAMING_SNAKE_CASE_: Any =[_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != """""" )
def __magic_name__ ( lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowercase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowercase )}-{split}'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , lowercase )
return f'''{filepath}*'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase )
if shard_lengths:
SCREAMING_SNAKE_CASE_: Any =len(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =[f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_: Optional[int] =[filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_: List[Any] =prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 173 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__magic_name__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__magic_name__ = logging.WARNING
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = os.getenv('''DATASETS_VERBOSITY''' , A__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def _lowerCAmelCase ( ):
'''simple docstring'''
return __name__.split('''.''' )[0]
def _lowerCAmelCase ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _lowerCAmelCase ( A__: Optional[str] = None ):
'''simple docstring'''
if name is None:
UpperCAmelCase = _get_library_name()
return logging.getLogger(A__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
_get_library_root_logger().setLevel(A__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
return set_verbosity(A__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
return set_verbosity(A__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
return set_verbosity(A__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
return set_verbosity(A__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = False
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowercase :
'''simple docstring'''
def __init__( self , *_snake_case , **_snake_case ) -> Dict: # pylint: disable=unused-argument
"""simple docstring"""
UpperCAmelCase = args[0] if args else None
def __iter__( self ) -> str:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , _snake_case ) -> Tuple:
"""simple docstring"""
def empty_fn(*_snake_case , **_snake_case ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Union[str, Any]:
"""simple docstring"""
return self
def __exit__( self , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
return
__magic_name__ = True
class lowercase :
'''simple docstring'''
def __call__( self , *_snake_case , _snake_case=False , **_snake_case ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_snake_case , **_snake_case )
else:
return EmptyTqdm(*_snake_case , **_snake_case )
def snake_case_ ( self , *_snake_case , **_snake_case ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_snake_case , **_snake_case )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ = _tqdm_cls()
def _lowerCAmelCase ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ):
'''simple docstring'''
global _tqdm_active
UpperCAmelCase = True
def _lowerCAmelCase ( ):
'''simple docstring'''
global _tqdm_active
UpperCAmelCase = False
| 361 |
def _lowerCAmelCase ( A__: int = 1000 ):
'''simple docstring'''
UpperCAmelCase = 2**power
UpperCAmelCase = str(A__ )
UpperCAmelCase = list(A__ )
UpperCAmelCase = 0
for i in list_num:
sum_of_num += int(A__ )
return sum_of_num
if __name__ == "__main__":
__magic_name__ = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
__magic_name__ = solution(power)
print("Sum of the digits is: ", result)
| 152 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ : List[str] ={
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] =[
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a__ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int = 3, _lowerCAmelCase : int = 7, _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : int = 1
for current_denominator in range(1, limit + 1 ):
_UpperCAmelCase : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCAmelCase : Optional[Any] = current_numerator
_UpperCAmelCase : str = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 246 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCamelCase : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 206 | def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : str = " " ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for index, char in enumerate(UpperCAmelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE = index + 1
elif index + 1 == len(UpperCAmelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 206 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(A_ )]
lowerCAmelCase__ : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(A_ ) <= key:
return input_string
for position, character in enumerate(A_ ):
lowerCAmelCase__ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Optional[int] = min(A_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(A_ )
lowerCAmelCase__ : str = [''''''.join(A_ ) for row in temp_grid]
lowerCAmelCase__ : int = ''''''.join(A_ )
return output_string
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : int = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(A_ )] # generates template
for position in range(len(A_ ) ):
lowerCAmelCase__ : int = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Dict = min(A_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
lowerCAmelCase__ : Dict = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase__ : List[Any] = input_string[counter : counter + len(A_ )]
grid.append(list(A_ ) )
counter += len(A_ )
lowerCAmelCase__ : Tuple = '''''' # reads as zigzag
for position in range(len(A_ ) ):
lowerCAmelCase__ : Tuple = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Optional[int] = min(A_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = {}
for key_guess in range(1 , len(A_ ) ): # tries every key
lowerCAmelCase__ : List[str] = decrypt(A_ , A_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 0 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self: Any ):
lowercase :Optional[Any] = []
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ):
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple ):
lowercase :List[Any] = pos
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: str ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowercase :Optional[int] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowercase :Union[str, Any] = 2 * start + 1
else:
lowercase :Any = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowercase :Any = heap[smallest_child], positions[smallest_child]
lowercase :Union[str, Any] = (
heap[start],
positions[start],
)
lowercase :Union[str, Any] = temp, tempa
lowercase :int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __lowerCamelCase )
self.top_to_bottom(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: int ):
lowercase :Optional[Any] = position[index]
while index != 0:
lowercase :Any = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowercase :Union[str, Any] = heap[parent]
lowercase :Optional[int] = position[parent]
self.set_position(position[parent] , __lowerCamelCase )
else:
lowercase :Tuple = val
lowercase :Union[str, Any] = temp
self.set_position(__lowerCamelCase , __lowerCamelCase )
break
lowercase :List[str] = parent
else:
lowercase :List[str] = val
lowercase :Union[str, Any] = temp
self.set_position(__lowerCamelCase , 0 )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] ):
lowercase :List[Any] = len(__lowerCamelCase ) // 2 - 1
for i in range(__lowerCamelCase , -1 , -1 ):
self.top_to_bottom(__lowerCamelCase , __lowerCamelCase , len(__lowerCamelCase ) , __lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[str] ):
lowercase :int = positions[0]
lowercase :Union[str, Any] = sys.maxsize
self.top_to_bottom(__lowerCamelCase , 0 , len(__lowerCamelCase ) , __lowerCamelCase )
return temp
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = Heap()
lowercase :Dict = [0] * len(lowerCamelCase )
lowercase :str = [-1] * len(lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowercase :Any = [] # Heap of Distance of vertices from their neighboring vertex
lowercase :Dict = []
for vertex in range(len(lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCamelCase )
heap.node_position.append(lowerCamelCase )
lowercase :Any = []
lowercase :List[Any] = 1
lowercase :List[Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowercase :Tuple = 0
lowercase :Optional[Any] = distance
heap.heapify(lowerCamelCase, lowerCamelCase )
for _ in range(1, len(lowerCamelCase ) ):
lowercase :Tuple = heap.delete_minimum(lowerCamelCase, lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowercase :Dict = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCamelCase )]
):
lowercase :Union[str, Any] = distance
heap.bottom_to_top(
lowerCamelCase, heap.get_position(lowerCamelCase ), lowerCamelCase, lowerCamelCase )
lowercase :Optional[Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_UpperCAmelCase : Dict = int(input("Enter number of edges: ").strip())
_UpperCAmelCase : List[str] = defaultdict(list)
for _ in range(edges_number):
_UpperCAmelCase : Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 368 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Union[str, Any] ):
lowercase :List[str] = dataset
lowercase :Optional[int] = process
lowercase :Union[str, Any] = params
def __len__( self: str ):
return len(self.dataset )
def __getitem__( self: int , _lowerCAmelCase: Dict ):
lowercase :Union[str, Any] = self.dataset[i]
lowercase :Optional[int] = self.process(_lowerCAmelCase , **self.params )
return processed
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int]=None ):
lowercase :Optional[Any] = loader
lowercase :int = infer
lowercase :Dict = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowercase :Union[str, Any] = None
lowercase :Any = loader_batch_size
# Internal bookkeeping
lowercase :Optional[Any] = None
lowercase :Dict = None
def __len__( self: Tuple ):
return len(self.loader )
def __iter__( self: List[str] ):
lowercase :Dict = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowercase :Optional[int] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowercase :str = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
lowercase :Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowercase :int = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase :List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowercase :Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase :List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowercase :Optional[int] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase :Optional[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase :Any = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowercase :List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowercase :List[Any] = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowercase :Tuple = next(self.iterator )
lowercase :Dict = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
lowercase :List[str] = processed
else:
lowercase :Tuple = list(processed.keys() )[0]
lowercase :Optional[Any] = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Optional[int] = len(_lowerCAmelCase )
else:
lowercase :Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase :Tuple = observed_batch_size
# Setting internal index to unwrap the batch
lowercase :int = processed
lowercase :Optional[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self: Tuple ):
lowercase :List[str] = iter(self.loader )
lowercase :str = None
return self
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
if self.subiterator is None:
lowercase :List[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowercase :str = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowercase :Tuple = self.infer(next(self.iterator ) , **self.params )
lowercase :Dict = next(self.subiterator )
return processed
class __lowerCAmelCase ( lowerCAmelCase):
def __iter__( self: str ):
lowercase :List[Any] = iter(self.loader )
return self
def SCREAMING_SNAKE_CASE ( self: str ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowercase :str = False
lowercase :int = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowercase :str = self.loader_batch_item()
lowercase :int = item.pop("is_last" )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
lowercase :str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
lowercase :Tuple = processed
else:
lowercase :Union[str, Any] = list(processed.keys() )[0]
lowercase :Any = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Dict = len(_lowerCAmelCase )
else:
lowercase :List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase :Union[str, Any] = observed_batch_size
lowercase :str = processed
lowercase :Optional[int] = 0
while self._loader_batch_index < self.loader_batch_size:
lowercase :Any = self.loader_batch_item()
lowercase :int = item.pop("is_last" )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
lowercase :Optional[Any] = processed
lowercase :str = item.pop("is_last" )
accumulator.append(_lowerCAmelCase )
return accumulator
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Dataset , _lowerCAmelCase: str ):
lowercase :Tuple = dataset
lowercase :Dict = key
def __len__( self: Any ):
return len(self.dataset )
def __getitem__( self: int , _lowerCAmelCase: int ):
return self.dataset[i][self.key]
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: List[Any] , _lowerCAmelCase: Dataset , _lowerCAmelCase: str , _lowerCAmelCase: str ):
lowercase :Union[str, Any] = dataset
lowercase :Optional[int] = keya
lowercase :str = keya
def __len__( self: Optional[Any] ):
return len(self.dataset )
def __getitem__( self: Optional[Any] , _lowerCAmelCase: int ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 158 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = 0
while b > 0:
if b & 1:
lowerCAmelCase__ : Tuple = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 37 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str:
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Dict = use_auxiliary_loss
lowerCAmelCase__ : Union[str, Any] = num_queries
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : List[str] = min_size
lowerCAmelCase__ : int = max_size
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[Any] = mask_feature_size
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase )
lowerCAmelCase__ : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5
).float()
lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long()
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self ) -> Dict:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states
lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]:
with torch.no_grad():
lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : Dict = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(
pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowercase : int = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Dict = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : str = MaskFormerModelTester(self )
lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase_ ( self ) -> str:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(__UpperCAmelCase )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Dict = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2
lowerCAmelCase__ : Any = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(),
}
lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self ) -> int:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Dict = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Tuple = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase = 1e-4
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> List[Any]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[str] = prepare_img()
lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : str = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : Tuple = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : str = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCAmelCase__ : Any = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 37 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_A = 25_60_47
_A = 25_61_45
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def _a (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Tuple = NllbTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = NllbTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
UpperCAmelCase__ : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : int = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Any = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : str = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : str = tokenizer_r.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : int = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase )
UpperCAmelCase__ : Any = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Tuple = tokenizer_r.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
UpperCAmelCase__ : int = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase )
UpperCAmelCase__ : Any = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Any = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
@require_torch
def _a (self ):
"""simple docstring"""
if not self.test_seqaseq:
return
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
UpperCAmelCase__ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase__ : Union[str, Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
UpperCAmelCase__ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCamelCase , tgt_texts=_lowerCamelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCAmelCase__ : Optional[int] = tokenizer.prepare_seqaseq_batch(
_lowerCamelCase , tgt_texts=_lowerCamelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCAmelCase__ : Any = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCamelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , _lowerCamelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[int] = [AddedToken("""<special>""" , lstrip=_lowerCamelCase )]
UpperCAmelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : int = tokenizer_r.encode("""Hey this is a <special> token""" )
UpperCAmelCase__ : int = tokenizer_r.encode("""<special>""" , add_special_tokens=_lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : str = tokenizer_p.encode("""Hey this is a <special> token""" )
UpperCAmelCase__ : List[Any] = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = """facebook/nllb-200-distilled-600M"""
SCREAMING_SNAKE_CASE = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def _a (cls ):
"""simple docstring"""
UpperCAmelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
UpperCAmelCase__ : int = 1
return cls
def _a (self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.assertIn(_lowerCamelCase , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : Tuple = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
UpperCAmelCase__ : Optional[Any] = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , _lowerCamelCase )
UpperCAmelCase__ : str = 10
UpperCAmelCase__ : str = self.tokenizer(_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256203, 3] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCamelCase )
UpperCAmelCase__ : List[str] = NllbTokenizer.from_pretrained(_lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCamelCase )
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase__ : int = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCAmelCase__ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCamelCase )
self.assertEqual(_lowerCamelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.tokenizer(self.src_text , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase__ : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase__ : List[str] = targets["""input_ids"""]
UpperCAmelCase__ : List[Any] = shift_tokens_right(
_lowerCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} , )
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Any = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : str = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 356 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = scope
UpperCAmelCase__ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : Tuple = num_patches + 2
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[str] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.type_sequence_label_size
UpperCAmelCase__ : List[str] = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : int = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Tuple = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a (self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : int = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase__ : List[str] = problem_type["""title"""]
UpperCAmelCase__ : List[Any] = problem_type["""num_labels"""]
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
UpperCAmelCase__ : Optional[int] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
UpperCAmelCase__ : str = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
UpperCAmelCase__ : Any = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _a (self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a (self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_lowerCamelCase )
# verify the logits
UpperCAmelCase__ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Dict = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
UpperCAmelCase__ : Dict = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase__ : int = model(_lowerCamelCase )
| 166 | 0 |
def lowerCAmelCase_ ( A_ ,A_):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
else:
return a * actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
def lowerCAmelCase_ ( A_ ,A_):
if b < 0:
return 1 / actual_power(A_ ,A_)
return actual_power(A_ ,A_)
if __name__ == "__main__":
print(power(-2, -3))
| 149 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowerCAmelCase_ ( A_ ,A_ ,A_):
UpperCamelCase__: List[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase__: str = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
UpperCamelCase__: str = F"{src_lang}-{tgt_lang}"
UpperCamelCase__: Optional[Any] = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(A_ ,exist_ok=A_)
UpperCamelCase__: Union[str, Any] = os.path.join(A_ ,"README.md")
print(F"Generating {path}")
with open(A_ ,"w" ,encoding="utf-8") as f:
f.write(A_)
# make sure we are under the root of the project
A__: Optional[Any] = Path(__file__).resolve().parent.parent.parent
A__: Optional[int] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A__ , A__ , A__: Optional[Any] = model_name.split('''-''')
A__: List[str] = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 149 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
A: int = SMALL_MODEL_IDENTIFIER
A: List[str] = '''pt'''
A: Optional[Any] = '''tf'''
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Any = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=SCREAMING_SNAKE_CASE_ )
model_tf.save_pretrained(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A: List[str] = '''mock_framework'''
# Framework provided - return whatever the user provides
A: Tuple = FeaturesManager.determine_framework(self.test_model , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE_ )
A: List[str] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Any ) -> str:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE_ )
A: int = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE_ )
A: Dict = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
A: Optional[int] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : str ) -> int:
'''simple docstring'''
A: Any = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE_ ):
A: Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
A: Dict = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE_ ):
A: List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_tf )
# Both in environment -> use PyTorch
A: Dict = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
A: Any = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE_ ), patch(
'''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE_ ):
A: Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt )
# Both not in environment -> raise error
A: List[Any] = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
A: Any = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE_ ), patch(
'''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE_ ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
A: Dict = FeaturesManager.determine_framework(self.test_model )
| 367 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str | Literal[False]:
A: List[str] = list(__lowercase )
A: Optional[Any] = list(__lowercase )
A: int = 0
for i in range(len(__lowercase ) ):
if lista[i] != lista[i]:
count += 1
A: Optional[Any] = '''_'''
if count > 1:
return False
else:
return "".join(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> list[str]:
A: Any = []
while True:
A: Dict = ['''$'''] * len(__lowercase )
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
for j in range(i + 1 , len(__lowercase ) ):
A: Any = compare_string(binary[i] , binary[j] )
if k is False:
A: Any = '''*'''
A: List[Any] = '''*'''
temp.append('''X''' )
for i in range(len(__lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowercase ) == 0:
return pi
A: List[Any] = list(set(__lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]:
A: Optional[int] = []
for minterm in minterms:
A: Optional[int] = ''''''
for _ in range(__lowercase ):
A: List[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowercase )
return temp
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> bool:
A: Union[str, Any] = list(__lowercase )
A: Union[str, Any] = list(__lowercase )
A: Optional[int] = 0
for i in range(len(__lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]:
A: List[Any] = []
A: Dict = [0] * len(__lowercase )
for i in range(len(chart[0] ) ):
A: List[str] = 0
A: str = -1
for j in range(len(__lowercase ) ):
if chart[j][i] == 1:
count += 1
A: Any = j
if count == 1:
A: Any = 1
for i in range(len(__lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowercase ) ):
A: Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
A: Dict = 0
A: Optional[int] = -1
A: Dict = 0
for i in range(len(__lowercase ) ):
A: str = chart[i].count(1 )
if count_n > max_n:
A: Tuple = count_n
A: Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowercase ) ):
A: Any = 0
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[int]]:
A: str = [[0 for x in range(len(__lowercase ) )] for x in range(len(__lowercase ) )]
for i in range(len(__lowercase ) ):
A: Tuple = prime_implicants[i].count('''_''' )
for j in range(len(__lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowercase ):
A: Optional[Any] = 1
return chart
def SCREAMING_SNAKE_CASE( ) -> None:
A: int = int(input('''Enter the no. of variables\n''' ) )
A: Optional[int] = [
float(__lowercase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
A: List[str] = decimal_to_binary(__lowercase , __lowercase )
A: str = check(__lowercase )
print('''Prime Implicants are:''' )
print(__lowercase )
A: List[Any] = prime_implicant_chart(__lowercase , __lowercase )
A: Any = selection(__lowercase , __lowercase )
print('''Essential Prime Implicants are:''' )
print(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 334 | 0 |
class A__ :
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = None
UpperCamelCase : int = None
UpperCamelCase : Optional[int] = graph
self._normalize_graph(A_ , A_ )
UpperCamelCase : Tuple = len(A_ )
UpperCamelCase : str = None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if sources is int:
UpperCamelCase : int = [sources]
if sinks is int:
UpperCamelCase : Union[str, Any] = [sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
UpperCamelCase : Any = sources[0]
UpperCamelCase : Union[str, Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
UpperCamelCase : Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase : Union[str, Any] = max_input_flow
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase : List[Any] = max_input_flow
UpperCamelCase : Optional[Any] = size - 1
def __UpperCamelCase( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = algorithm(self )
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = flow_network
UpperCamelCase : str = flow_network.verticesCount
UpperCamelCase : Dict = flow_network.sourceIndex
UpperCamelCase : Optional[Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase : Union[str, Any] = flow_network.graph
UpperCamelCase : Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
UpperCamelCase : Dict = True
def __UpperCamelCase( self ):
'''simple docstring'''
pass
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
# use this to save your result
UpperCamelCase : List[Any] = -1
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase : str = [0] * self.verticies_count
UpperCamelCase : Dict = [0] * self.verticies_count
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase : int = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase : int = 0
while i < len(A_ ):
UpperCamelCase : List[str] = vertices_list[i]
UpperCamelCase : str = self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
UpperCamelCase : Optional[int] = 0
else:
i += 1
UpperCamelCase : Dict = sum(self.preflow[self.source_index] )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase : Tuple = self.heights[to_index]
if min_height is not None:
UpperCamelCase : Optional[Any] = min_height + 1
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = [0]
__lowerCamelCase : str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase : Union[str, Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 52 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
a_ = 50000
a_ = 5000
a_ , a_ = os.path.split(__file__)
a_ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Tuple ) ->Tuple:
'''simple docstring'''
for i in range(snake_case_ ):
__A : int = dataset[i]
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Optional[Any] ,snake_case_ : int ) ->Tuple:
'''simple docstring'''
for i in range(0 ,len(snake_case_ ) ,snake_case_ ):
__A : List[str] = dataset[i : i + batch_size]
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : List[Any] ,snake_case_ : Any ) ->int:
'''simple docstring'''
with dataset.formatted_as(type=snake_case_ ):
for i in range(snake_case_ ):
__A : Union[str, Any] = dataset[i]
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Any ,snake_case_ : Union[str, Any] ,snake_case_ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=snake_case_ ):
for i in range(0 ,snake_case_ ,snake_case_ ):
__A : Dict = dataset[i : i + batch_size]
def __lowercase ( ) ->Optional[int]:
'''simple docstring'''
__A : int = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__A : Optional[int] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
__A : int = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__A : Any = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__A : List[Any] = generate_example_dataset(
os.path.join(snake_case_ ,'''dataset.arrow''' ) ,snake_case_ ,num_examples=snake_case_ ,seq_shapes={'''list''': (100,)} ,)
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ ,str(snake_case_ ) )
__A : Dict = func(snake_case_ ,**snake_case_ )
print('''shuffling dataset''' )
__A : int = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' ,func.__name__ ,str(snake_case_ ) )
__A : Optional[Any] = func(
snake_case_ ,**snake_case_ )
with open(snake_case_ ,'''wb''' ) as f:
f.write(json.dumps(snake_case_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 179 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def _snake_case ( self: Tuple , a: Dict ):
if isinstance(a , a ):
__lowerCamelCase : Union[str, Any] = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self: Optional[Any] , a: int , a: Dict , a: Optional[Any] ):
if len(a ) == 0 or len(a ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(a ) )
if isinstance(a , a ):
__lowerCamelCase : int = [sequences]
__lowerCamelCase : List[str] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCamelCase )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Optional[Any] , a: Optional[Any]=ZeroShotClassificationArgumentHandler() , *a: Optional[Any] , **a: int ):
__lowerCamelCase : Tuple = args_parser
super().__init__(*a , **a )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def _snake_case ( self: Dict ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def _snake_case ( self: Any , a: Dict , a: Tuple=True , a: List[str]=True , a: Dict=TruncationStrategy.ONLY_FIRST , **a: Tuple ):
__lowerCamelCase : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
__lowerCamelCase : List[Any] = self.tokenizer.eos_token
try:
__lowerCamelCase : List[str] = self.tokenizer(
a , add_special_tokens=a , return_tensors=a , padding=a , truncation=a , )
except Exception as e:
if "too short" in str(a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__lowerCamelCase : str = self.tokenizer(
a , add_special_tokens=a , return_tensors=a , padding=a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _snake_case ( self: Any , **a: Dict ):
if kwargs.get('multi_class' , a ) is not None:
__lowerCamelCase : List[str] = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
__lowerCamelCase : str = {}
if "candidate_labels" in kwargs:
__lowerCamelCase : Any = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
__lowerCamelCase : Dict = kwargs['hypothesis_template']
__lowerCamelCase : str = {}
if "multi_label" in kwargs:
__lowerCamelCase : int = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self: Any , a: Union[str, List[str]] , *a: str , **a: Optional[int] , ):
if len(a ) == 0:
pass
elif len(a ) == 1 and "candidate_labels" not in kwargs:
__lowerCamelCase : Any = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(a , **a )
def _snake_case ( self: int , a: Optional[int] , a: List[str]=None , a: List[str]="This example is {}." ):
__lowerCamelCase : Dict = self._args_parser(a , a , a )
for i, (candidate_label, sequence_pair) in enumerate(zip(a , a ) ):
__lowerCamelCase : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(a ) - 1,
**model_input,
}
def _snake_case ( self: Any , a: str ):
__lowerCamelCase : List[str] = inputs['candidate_label']
__lowerCamelCase : Optional[int] = inputs['sequence']
__lowerCamelCase : Tuple = {k: inputs[k] for k in self.tokenizer.model_input_names}
__lowerCamelCase : Dict = self.model(**a )
__lowerCamelCase : Optional[int] = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def _snake_case ( self: List[str] , a: Optional[int] , a: List[Any]=False ):
__lowerCamelCase : Any = [outputs['candidate_label'] for outputs in model_outputs]
__lowerCamelCase : str = [outputs['sequence'] for outputs in model_outputs]
__lowerCamelCase : List[Any] = np.concatenate([output['logits'].numpy() for output in model_outputs] )
__lowerCamelCase : List[Any] = logits.shape[0]
__lowerCamelCase : List[Any] = len(a )
__lowerCamelCase : List[str] = N // n
__lowerCamelCase : List[Any] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__lowerCamelCase : List[Any] = self.entailment_id
__lowerCamelCase : Optional[Any] = -1 if entailment_id == 0 else 0
__lowerCamelCase : Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]]
__lowerCamelCase : List[Any] = np.exp(a ) / np.exp(a ).sum(-1 , keepdims=a )
__lowerCamelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__lowerCamelCase : List[str] = reshaped_outputs[..., self.entailment_id]
__lowerCamelCase : Union[str, Any] = np.exp(a ) / np.exp(a ).sum(-1 , keepdims=a )
__lowerCamelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 367 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194 | 0 |
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=_UpperCAmelCase )
A__ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=_UpperCAmelCase )
env_command_parser(subparsers=_UpperCAmelCase )
launch_command_parser(subparsers=_UpperCAmelCase )
tpu_command_parser(subparsers=_UpperCAmelCase )
test_command_parser(subparsers=_UpperCAmelCase )
# Let's go
A__ = parser.parse_args()
if not hasattr(_UpperCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 14 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Optional[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Tuple = in_proj_bias[: config.hidden_size]
A_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = dct.pop(_UpperCAmelCase )
A_ : Optional[int] = val
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : List[Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_UpperCAmelCase , )
A_ : Optional[int] = ViTHybridConfig(backbone_config=_UpperCAmelCase , image_size=384 , num_labels=1000 )
A_ : Union[str, Any] = False
# load original model from timm
A_ : List[Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
A_ : Any = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Union[str, Any] = 'huggingface/label-files'
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : List[Any] = ViTHybridModel(_UpperCAmelCase ).eval()
else:
A_ : str = ViTHybridForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
A_ : Dict = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
A_ : List[str] = transform.transforms
A_ : List[str] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A_ : Tuple = ViTHybridImageProcessor(
do_resize=_UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Optional[Any] = prepare_img()
A_ : Any = transform(_UpperCAmelCase ).unsqueeze(0 )
A_ : Dict = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
A_ : List[Any] = model(_UpperCAmelCase )
A_ : List[str] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
A_ : Union[str, Any] = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
A_ : Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 286 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = KandinskyVaaControlnetPipeline
lowercase = ['image_embeds', 'negative_image_embeds', 'hint']
lowercase = ['image_embeds', 'negative_image_embeds', 'hint']
lowercase = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase = False
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return 1_0_0
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCamelCase_ : Union[str, Any] = UNetaDConditionModel(**snake_case )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
"""simple docstring"""
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.dummy_unet
UpperCamelCase_ : int = self.dummy_movq
UpperCamelCase_ : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=snake_case , set_alpha_to_one=snake_case , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case , )
UpperCamelCase_ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Tuple , snake_case : List[str]=0 ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case ) ).to(snake_case )
UpperCamelCase_ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case )
# create hint
UpperCamelCase_ : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith('mps' ):
UpperCamelCase_ : Union[str, Any] = torch.manual_seed(snake_case )
else:
UpperCamelCase_ : Any = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCamelCase_ : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = 'cpu'
UpperCamelCase_ : int = self.get_dummy_components()
UpperCamelCase_ : int = self.pipeline_class(**snake_case )
UpperCamelCase_ : Optional[int] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : Any = pipe(**self.get_dummy_inputs(snake_case ) )
UpperCamelCase_ : Any = output.images
UpperCamelCase_ : Union[str, Any] = pipe(
**self.get_dummy_inputs(snake_case ) , return_dict=snake_case , )[0]
UpperCamelCase_ : str = image[0, -3:, -3:, -1]
UpperCamelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_ : Tuple = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
UpperCamelCase_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
UpperCamelCase_ : Any = torch.from_numpy(np.array(snake_case ) ).float() / 255.0
UpperCamelCase_ : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase_ : int = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case )
UpperCamelCase_ : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
UpperCamelCase_ : int = pipeline.to(snake_case )
pipeline.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : Any = 'A robot, 4k photo'
UpperCamelCase_ : Union[str, Any] = torch.Generator(device='cuda' ).manual_seed(0 )
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = pipe_prior(
snake_case , generator=snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
UpperCamelCase_ : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
UpperCamelCase_ : Optional[Any] = pipeline(
image_embeds=snake_case , negative_image_embeds=snake_case , hint=snake_case , generator=snake_case , num_inference_steps=1_0_0 , output_type='np' , )
UpperCamelCase_ : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(snake_case , snake_case )
| 50 | import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
debug_launcher(test_ops.main )
| 50 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = torch.device('''cpu''')
def __UpperCamelCase ( ):
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
return im
def __UpperCamelCase ( _A ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
for k in state_dict.keys():
lowerCAmelCase_ = k
if ".pwconv" in k:
lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCAmelCase_ = k_new.split('''.''' )
if ls[2].isdigit():
lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase_ = [3, 3, 6, 4]
lowerCAmelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase_ = [3, 3, 9, 6]
lowerCAmelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase_ = [4, 3, 10, 5]
lowerCAmelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase_ = [4, 4, 12, 6]
lowerCAmelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A )
else:
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = create_rename_keys(_A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_A , _A , _A )
# load HuggingFace model
lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval()
hf_model.load_state_dict(_A )
# prepare test inputs
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
# compare outputs from both models
lowerCAmelCase_ = get_expected_output(_A )
lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 )
Path(_A ).mkdir(exist_ok=_A )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 278 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A = dict(zip(vocab, range(len(vocab))))
_A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A = Path(tmpdirname)
_A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
_A = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 278 | 1 |
"""simple docstring"""
import math
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCamelCase : Dict = 0.0_2
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Dict = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(A_ ):
# Forward propagation
lowerCAmelCase__ : int = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCAmelCase__ : Union[str, Any] = (expected / 1_00) - layer_a
# Error delta
lowerCAmelCase__ : str = layer_1_error * sigmoid_function(A_ , A_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[Any] = int(input('''Expected value: '''))
__UpperCamelCase : Optional[Any] = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 74 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCamelCase : Optional[Any] = '''Create a default config file for Accelerate with only a few flags set.'''
def __SCREAMING_SNAKE_CASE ( A_="no" , A_ = default_json_config_file , A_ = False ):
lowerCAmelCase__ : List[Any] = Path(A_ )
path.parent.mkdir(parents=A_ , exist_ok=A_ )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowerCAmelCase__ : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowerCAmelCase__ : Optional[Any] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase__ : Union[str, Any] = torch.cuda.device_count()
lowerCAmelCase__ : Tuple = num_gpus
lowerCAmelCase__ : List[str] = False
if num_gpus > 1:
lowerCAmelCase__ : Any = '''MULTI_GPU'''
else:
lowerCAmelCase__ : Union[str, Any] = '''NO'''
elif is_xpu_available() and use_xpu:
lowerCAmelCase__ : Optional[Any] = torch.xpu.device_count()
lowerCAmelCase__ : Tuple = num_xpus
lowerCAmelCase__ : List[str] = False
if num_xpus > 1:
lowerCAmelCase__ : Union[str, Any] = '''MULTI_XPU'''
else:
lowerCAmelCase__ : List[Any] = '''NO'''
elif is_npu_available():
lowerCAmelCase__ : Optional[int] = torch.npu.device_count()
lowerCAmelCase__ : List[Any] = num_npus
lowerCAmelCase__ : Optional[int] = False
if num_npus > 1:
lowerCAmelCase__ : Any = '''MULTI_NPU'''
else:
lowerCAmelCase__ : int = '''NO'''
else:
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Optional[Any] = '''NO'''
lowerCAmelCase__ : Optional[Any] = ClusterConfig(**A_ )
config.to_json_file(A_ )
return path
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Any = parser.add_parser('''default''' , parents=A_ , help=A_ , formatter_class=A_ )
parser.add_argument(
'''--config_file''' , default=A_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=A_ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=A_ )
return parser
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 74 | 1 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : int ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [[0] * n for i in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = y_points[i]
for i in range(2 , __UpperCamelCase ):
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : List[str] = NewType('''DataClass''', Any)
__lowerCamelCase : Dict = NewType('''DataClassType''', Any)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> int:
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> Callable[[str], Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( *,
__UpperCamelCase : Union[str, List[str]] = None , __UpperCamelCase : str = None , __UpperCamelCase : Any = dataclasses.MISSING , __UpperCamelCase : Callable[[], Any] = dataclasses.MISSING , __UpperCamelCase : dict = None , **__UpperCamelCase : Dict , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
def __init__( self : int , _lowercase : Union[DataClassType, Iterable[DataClassType]] , **_lowercase : List[str] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**_lowercase )
if dataclasses.is_dataclass(_lowercase ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(_lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowercase )
@staticmethod
def __a ( _lowercase : ArgumentParser , _lowercase : dataclasses.Field ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f"""--{field.name}"""
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowercase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""aliases""" , [] )
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_lowercase , """UnionType""" ) and isinstance(_lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowercase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(_lowercase ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(_lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , _lowercase ) and issubclass(field.type , _lowercase )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(_lowercase )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = """?"""
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(_lowercase ) and issubclass(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = """+"""
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(_lowercase , *_lowercase , **_lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **_lowercase )
def __a ( self : List[str] , _lowercase : DataClassType ):
"""simple docstring"""
if hasattr(_lowercase , """_argument_group_name""" ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(_lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowercase ):
SCREAMING_SNAKE_CASE__ = """.""".join(map(_lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_lowercase ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(_lowercase , _lowercase )
def __a ( self : str , _lowercase : int=None , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=True , _lowercase : Any=None , _lowercase : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(_lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(_lowercase , type=_lowercase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = vars(_lowercase ).get(args_file_flag.lstrip("""-""" ) , _lowercase )
if cmd_args_file_paths:
args_files.extend([Path(_lowercase ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(_lowercase ).items() if k in keys}
for k in keys:
delattr(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __a ( self : List[str] , _lowercase : Dict[str, Any] , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_lowercase )}""" )
return tuple(_lowercase )
def __a ( self : List[str] , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
with open(Path(_lowercase ) , encoding="""utf-8""" ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(_lowercase , allow_extra_keys=_lowercase )
return tuple(_lowercase )
def __a ( self : Tuple , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(_lowercase ).read_text() ) , allow_extra_keys=_lowercase )
return tuple(_lowercase )
| 219 | 1 |
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__UpperCamelCase , int(b / 2 ) ) * actual_power(__UpperCamelCase , int(b / 2 ) )
else:
return a * actual_power(__UpperCamelCase , int(b / 2 ) ) * actual_power(__UpperCamelCase , int(b / 2 ) )
def a__ ( __UpperCamelCase , __UpperCamelCase ):
if b < 0:
return 1 / actual_power(__UpperCamelCase , __UpperCamelCase )
return actual_power(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 305 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : str = logging.get_logger(__name__)
A : Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''table-transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=None , __magic_name__ : Any=3 , __magic_name__ : List[str]=100 , __magic_name__ : Union[str, Any]=6 , __magic_name__ : Dict=2_048 , __magic_name__ : str=8 , __magic_name__ : int=6 , __magic_name__ : List[Any]=2_048 , __magic_name__ : Optional[int]=8 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]="relu" , __magic_name__ : List[str]=256 , __magic_name__ : List[str]=0.1 , __magic_name__ : int=0.0 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : str=1.0 , __magic_name__ : int=False , __magic_name__ : Dict="sine" , __magic_name__ : Union[str, Any]="resnet50" , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : List[str]=1 , __magic_name__ : int=5 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Optional[int]=1 , __magic_name__ : Optional[Any]=5 , __magic_name__ : Optional[int]=2 , __magic_name__ : Union[str, Any]=0.1 , **__magic_name__ : Tuple , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(__magic_name__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None, None
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __A ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : Any ) -> int:
return self.d_model
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __A ( self : Any ) -> float:
return 1e-5
@property
def __A ( self : int ) -> int:
return 12
| 305 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def _UpperCAmelCase ( snake_case = 1_00_00_00 , snake_case = 10 ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(_a )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_a , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"{solution() = }")
| 82 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 288 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __a ( __UpperCamelCase ):
__lowercase : Any = 'pegasus'
__lowercase : Union[str, Any] = ['past_key_values']
__lowercase : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowerCAmelCase__=50_265 , lowerCAmelCase__=1_024 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_024 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: int = vocab_size
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[str] = d_model
lowercase__: Optional[Any] = encoder_ffn_dim
lowercase__: Optional[Any] = encoder_layers
lowercase__: Union[str, Any] = encoder_attention_heads
lowercase__: Optional[int] = decoder_ffn_dim
lowercase__: Tuple = decoder_layers
lowercase__: Union[str, Any] = decoder_attention_heads
lowercase__: Dict = dropout
lowercase__: List[str] = attention_dropout
lowercase__: List[str] = activation_dropout
lowercase__: Optional[int] = activation_function
lowercase__: Dict = init_std
lowercase__: Optional[Any] = encoder_layerdrop
lowercase__: List[str] = decoder_layerdrop
lowercase__: Union[str, Any] = use_cache
lowercase__: Any = encoder_layers
lowercase__: List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.d_model
| 288 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : Dict =proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None:
'''simple docstring'''
def identity_function(__lowerCAmelCase ) -> float:
return x
UpperCAmelCase : List[Any] =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Dict =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : List[Any] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Union[str, Any] =use_input_mask
UpperCAmelCase : Tuple =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : Tuple =hidden_size
UpperCAmelCase : Dict =projection_dim
UpperCAmelCase : Optional[int] =num_hidden_layers
UpperCAmelCase : Dict =num_attention_heads
UpperCAmelCase : int =intermediate_size
UpperCAmelCase : Any =dropout
UpperCAmelCase : Union[str, Any] =attention_dropout
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : str =scope
UpperCAmelCase : str =bos_token_id
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int =None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Optional[int] =input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape
UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : List[Any] =1
UpperCAmelCase : Tuple =0
UpperCAmelCase : List[Any] =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ )
UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ )
UpperCAmelCase : str =model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs
UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Dict = False
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =BlipTextModelTester(self )
UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__=True ) -> Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
| 348 | 1 |
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
return len(set(lowerCAmelCase__ ) ) == len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase__ = TaTokenizerFast
lowerCAmelCase__ = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase__ = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 119 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=3 , _A=32 , _A=3 , _A=10 , _A=[10, 20, 30, 40] , _A=[1, 1, 2, 1] , _A=True , _A=True , _A="relu" , _A=3 , _A=None , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = RegNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = RegNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ =(
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = RegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> Tuple:
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> Dict:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> str:
pass
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=_SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _UpperCamelCase ( self ) -> List[Any]:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ = layer_type
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _UpperCamelCase ( self ) -> str:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = RegNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 299 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 366 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__SCREAMING_SNAKE_CASE =''
__SCREAMING_SNAKE_CASE =''
__SCREAMING_SNAKE_CASE =''
__SCREAMING_SNAKE_CASE =1 # (0 is vertical, 1 is horizontal)
def lowercase__( ):
lowercase_ : Union[str, Any] = get_dataset(lowercase_ , lowercase_ )
print('Processing...' )
lowercase_ : Union[str, Any] = update_image_and_anno(lowercase_ , lowercase_ , lowercase_ )
for index, image in enumerate(lowercase_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : List[Any] = random_chars(32 )
lowercase_ : str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
lowercase_ : Any = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , lowercase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(lowercase_ )} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos[index]:
lowercase_ : Any = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(lowercase_ )
with open(F'''/{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = []
lowercase_ : Optional[int] = []
for label_file in glob.glob(os.path.join(lowercase_ , '*.txt' ) ):
lowercase_ : str = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(lowercase_ ) as in_file:
lowercase_ : Tuple = in_file.readlines()
lowercase_ : int = os.path.join(lowercase_ , F'''{label_name}.jpg''' )
lowercase_ : int = []
for obj_list in obj_lists:
lowercase_ : Optional[Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowercase_ )
labels.append(lowercase_ )
return img_paths, labels
def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int = 1 ):
lowercase_ : Dict = []
lowercase_ : Any = []
lowercase_ : List[str] = []
for idx in range(len(lowercase_ ) ):
lowercase_ : List[Any] = []
lowercase_ : Dict = img_list[idx]
path_list.append(lowercase_ )
lowercase_ : Optional[Any] = anno_list[idx]
lowercase_ : int = cva.imread(lowercase_ )
if flip_type == 1:
lowercase_ : List[Any] = cva.flip(lowercase_ , lowercase_ )
for bbox in img_annos:
lowercase_ : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowercase_ : str = cva.flip(lowercase_ , lowercase_ )
for bbox in img_annos:
lowercase_ : Optional[int] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowercase_ )
new_imgs_list.append(lowercase_ )
return new_imgs_list, new_annos_lists, path_list
def lowercase__( __SCREAMING_SNAKE_CASE : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(lowercase_ ) for _ in range(lowercase_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 213 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: int , lowercase_: Dict , lowercase_: Tuple ) -> Any:
# Construct model
if gpta_config_file == "":
A__ : Dict = GPTaConfig()
else:
A__ : List[Any] = GPTaConfig.from_json_file(lowercase_ )
A__ : Tuple = GPTaModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A_ : str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 192 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """van"""
def __init__( self : List[Any] , __UpperCAmelCase : Tuple=224 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Any=[7, 3, 3, 3] , __UpperCAmelCase : str=[4, 2, 2, 2] , __UpperCAmelCase : List[Any]=[64, 128, 320, 512] , __UpperCAmelCase : Optional[Any]=[3, 3, 12, 3] , __UpperCAmelCase : List[str]=[8, 8, 4, 4] , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : List[Any]=1e-6 , __UpperCAmelCase : str=1e-2 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Any=0.0 , **__UpperCAmelCase : Optional[Any] , ):
super().__init__(**__UpperCAmelCase)
a : List[Any] = image_size
a : Union[str, Any] = num_channels
a : List[str] = patch_sizes
a : Optional[int] = strides
a : List[str] = hidden_sizes
a : Optional[Any] = depths
a : Dict = mlp_ratios
a : Any = hidden_act
a : Optional[int] = initializer_range
a : List[Any] = layer_norm_eps
a : int = layer_scale_init_value
a : Optional[Any] = drop_path_rate
a : Dict = dropout_rate
| 226 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """char"""
UpperCAmelCase : Optional[Any] = """bpe"""
UpperCAmelCase : Optional[Any] = """wp"""
__lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = ["""image_processor""", """char_tokenizer"""]
UpperCAmelCase : Optional[Any] = """ViTImageProcessor"""
UpperCAmelCase : List[Any] = """MgpstrTokenizer"""
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : str):
a : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a : List[str] = kwargs.pop("feature_extractor")
a : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
a : Union[str, Any] = tokenizer
a : int = AutoTokenizer.from_pretrained("gpt2")
a : str = AutoTokenizer.from_pretrained("bert-base-uncased")
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def __call__( self : str , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : int):
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
a : List[str] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a : Optional[Any] = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
a : Any = encodings["input_ids"]
return inputs
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str]):
a , a , a : Tuple = sequences
a : Optional[int] = char_preds.size(0)
a , a : Dict = self._decode_helper(__UpperCAmelCase , "char")
a , a : Dict = self._decode_helper(__UpperCAmelCase , "bpe")
a , a : Union[str, Any] = self._decode_helper(__UpperCAmelCase , "wp")
a : Any = []
a : Union[str, Any] = []
for i in range(__UpperCAmelCase):
a : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
a : Optional[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
a : List[str] = scores.index(max(__UpperCAmelCase))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
a : Dict = {}
a : List[str] = final_strs
a : str = final_scores
a : int = char_strs
a : int = bpe_strs
a : Tuple = wp_strs
return out
def __snake_case ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]):
if format == DecodeType.CHARACTER:
a : int = self.char_decode
a : int = 1
a : Dict = "[s]"
elif format == DecodeType.BPE:
a : List[str] = self.bpe_decode
a : List[str] = 2
a : int = "#"
elif format == DecodeType.WORDPIECE:
a : Union[str, Any] = self.wp_decode
a : List[str] = 102
a : int = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''')
a , a : str = [], []
a : Optional[int] = pred_logits.size(0)
a : List[str] = pred_logits.size(1)
a , a : Tuple = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase)
a : List[str] = preds_index.view(-1 , __UpperCAmelCase)[:, 1:]
a : Any = decoder(__UpperCAmelCase)
a , a : Union[str, Any] = torch.nn.functional.softmax(__UpperCAmelCase , dim=2).max(dim=2)
a : Union[str, Any] = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase):
a : str = preds_str[index].find(__UpperCAmelCase)
a : Optional[Any] = preds_str[index][:pred_eos]
a : Optional[int] = preds_index[index].cpu().tolist()
a : Optional[int] = pred_index.index(__UpperCAmelCase) if eos_token in pred_index else -1
a : List[str] = preds_max_prob[index][: pred_eos_index + 1]
a : int = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase)
conf_scores.append(__UpperCAmelCase)
return dec_strs, conf_scores
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any):
a : Dict = [seq.replace(" " , "") for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase)]
return decode_strs
def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[str]):
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Any = [seq.replace(" " , "") for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase)]
return decode_strs
| 226 | 1 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = 1 , UpperCamelCase = 1.0E4 , UpperCamelCase = False , UpperCamelCase = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
A = float(embedding_dim // 2 )
A = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
A = min_timescale * jnp.exp(jnp.arange(UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
A = jnp.expand_dims(UpperCamelCase , 1 ) * jnp.expand_dims(UpperCamelCase , 0 )
# scale embeddings
A = scale * emb
if flip_sin_to_cos:
A = jnp.concatenate([jnp.cos(UpperCamelCase ), jnp.sin(UpperCamelCase )] , axis=1 )
else:
A = jnp.concatenate([jnp.sin(UpperCamelCase ), jnp.cos(UpperCamelCase )] , axis=1 )
A = jnp.reshape(UpperCamelCase , [jnp.shape(UpperCamelCase )[0], embedding_dim] )
return signal
class _UpperCAmelCase ( nn.Module ):
UpperCamelCase = 3_2
UpperCamelCase = jnp.floataa
@nn.compact
def __call__( self :Any , __UpperCamelCase :Any ):
A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__UpperCamelCase )
A = nn.silu(__UpperCamelCase )
A = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__UpperCamelCase )
return temb
class _UpperCAmelCase ( nn.Module ):
UpperCamelCase = 3_2
UpperCamelCase = False
UpperCamelCase = 1
@nn.compact
def __call__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return get_sinusoidal_embeddings(
__UpperCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 292 |
"""simple docstring"""
from math import isqrt, loga
def A__ ( UpperCamelCase ):
A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
A = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def A__ ( UpperCamelCase = 800_800 , UpperCamelCase = 800_800 ):
A = degree * loga(UpperCamelCase )
A = int(UpperCamelCase )
A = calculate_prime_numbers(UpperCamelCase )
A = 0
A = 0
A = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 292 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A :
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : Dict ,_snake_case : Union[str, Any]=3 ,_snake_case : List[Any]=7 ,_snake_case : Dict=True ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=True ,_snake_case : Any=99 ,_snake_case : Any=32 ,_snake_case : str=5 ,_snake_case : Union[str, Any]=4 ,_snake_case : List[str]=37 ,_snake_case : Optional[Any]="gelu" ,_snake_case : Optional[Any]=0.1 ,_snake_case : List[str]=0.1 ,_snake_case : Dict=512 ,_snake_case : List[str]=16 ,_snake_case : List[Any]=2 ,_snake_case : Dict=0.02 ,_snake_case : Any=3 ,_snake_case : int=4 ,_snake_case : Any=None ,) -> Tuple:
"""simple docstring"""
lowercase__ : str = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Any = seq_length
lowercase__ : int = is_training
lowercase__ : List[Any] = use_input_mask
lowercase__ : Union[str, Any] = use_token_type_ids
lowercase__ : List[Any] = use_labels
lowercase__ : List[str] = vocab_size
lowercase__ : str = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[int] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Optional[int] = num_choices
lowercase__ : Union[str, Any] = scope
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Dict = None
if self.use_input_mask:
lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Any = None
lowercase__ : int = None
lowercase__ : Dict = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,pad_token_id=1 ,new_decoder_architecture=_snake_case ,)
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : str ,_snake_case : Tuple ,_snake_case : str ,_snake_case : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = FalconModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case )
lowercase__ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : int ,_snake_case : Optional[int] ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : str ,_snake_case : str ,_snake_case : int ,_snake_case : Dict ,_snake_case : Tuple ,) -> Dict:
"""simple docstring"""
lowercase__ : Any = True
lowercase__ : int = FalconModel(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[int] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,)
lowercase__ : List[str] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,)
lowercase__ : Union[str, Any] = model(_snake_case ,attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : int ,_snake_case : Any ,_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : List[str] ,_snake_case : str ,_snake_case : Union[str, Any] ,_snake_case : Tuple ,_snake_case : Union[str, Any] ,_snake_case : str ,) -> List[Any]:
"""simple docstring"""
lowercase__ : Dict = FalconForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : List[str] ,_snake_case : List[Any] ,_snake_case : Optional[int] ,_snake_case : str ,_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Any ,_snake_case : Optional[int] ,) -> int:
"""simple docstring"""
lowercase__ : Tuple = True
lowercase__ : Any = True
lowercase__ : Dict = FalconForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowercase__ : Union[str, Any] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,use_cache=_snake_case ,)
lowercase__ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase__ : str = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase__ : Any = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase__ : Union[str, Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase__ : Optional[int] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
lowercase__ : Union[str, Any] = model(
_snake_case ,attention_mask=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,past_key_values=_snake_case ,output_hidden_states=_snake_case ,)['''hidden_states'''][0]
# select random slice
lowercase__ : List[Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1e-3 ) )
def UpperCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : int = config_and_inputs
lowercase__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase : List[str] = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase : Tuple = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[Any] = False
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = FalconModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , *lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowercase__ : Optional[int] = alibi
self.model_tester.create_and_check_model(_snake_case ,*_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = 3
lowercase__ : Optional[Any] = input_dict['''input_ids''']
lowercase__ : Optional[int] = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Any = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : Optional[int] = FalconForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = 3
lowercase__ : Any = '''single_label_classification'''
lowercase__ : Any = input_dict['''input_ids''']
lowercase__ : str = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
lowercase__ : Any = FalconForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = input_dict['''input_ids''']
lowercase__ : List[Any] = FalconForCausalLM(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Dict = model(_snake_case ,use_cache=_snake_case )
lowercase__ : List[str] = input_ids.shape[0]
lowercase__ : Optional[Any] = model._convert_to_rw_cache(result.past_key_values )
lowercase__ : Dict = model._convert_cache_to_standard_format(_snake_case ,_snake_case )
for layer in range(len(_snake_case ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = 3
lowercase__ : Tuple = '''multi_label_classification'''
lowercase__ : Optional[Any] = input_dict['''input_ids''']
lowercase__ : Union[str, Any] = input_ids.ne(1 ).to(_snake_case )
lowercase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : Optional[Any] = FalconForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Any = model(_snake_case ,attention_mask=_snake_case ,labels=_snake_case )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_snake_case ,'''use_cache''' ):
return
lowercase__ : str = model_class(_snake_case ).to(_snake_case )
if "use_cache" not in inputs:
lowercase__ : int = True
lowercase__ : Optional[int] = model(**_snake_case )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowercase__ : Optional[int] = (
getattr(_snake_case ,'''decoder_layers''' ,_snake_case )
or getattr(_snake_case ,'''num_decoder_layers''' ,_snake_case )
or config.num_hidden_layers
)
lowercase__ : List[str] = getattr(_snake_case ,'''num_kv_heads''' ,config.num_attention_heads )
lowercase__ : List[Any] = getattr(_snake_case ,'''d_model''' ,config.hidden_size )
lowercase__ : Dict = embed_dim // num_attention_heads
lowercase__ : Tuple = outputs['''past_key_values''']
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ : Dict = inputs['''input_ids'''].shape
for i in range(_snake_case ):
if config.new_decoder_architecture:
lowercase__ : int = config.num_attention_heads
elif config.multi_query:
lowercase__ : int = 1
self.assertEqual(len(past_kv[0] ) ,2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
lowercase__ : List[Any] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(_snake_case )
lowercase__ : List[str] = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_snake_case )
lowercase__ : Any = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
lowercase__ : List[Any] = model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=19 )
lowercase__ : Union[str, Any] = tokenizer.batch_decode(_snake_case )[0]
self.assertEqual(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = FalconForCausalLM.from_pretrained(_snake_case )
model.eval()
model.to(_snake_case )
lowercase__ : Optional[Any] = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_snake_case )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=4 )
model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=4 )
model.generate(**_snake_case ,num_beams=2 ,max_new_tokens=4 )
@slow
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : List[Any] = FalconForCausalLM.from_pretrained(_snake_case )
model.eval()
model.to(device=_snake_case )
lowercase__ : List[Any] = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(_snake_case )
# Test results are the same with and without cache
lowercase__ : Optional[Any] = model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=20 ,use_cache=_snake_case )
lowercase__ : str = model.generate(**_snake_case ,do_sample=_snake_case ,max_new_tokens=20 ,use_cache=_snake_case )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 302 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
lowercase__ : Tuple = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ : Optional[int] = model.generate(**_snake_case )
lowercase__ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ : int = model_reloaded.generate(**_snake_case )
self.assertTrue(torch.allclose(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5'''
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case )
lowercase__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_snake_case ):
model.save_pretrained(_snake_case )
lowercase__ : int = model.reverse_bettertransformer()
model.save_pretrained(_snake_case )
| 302 | 1 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :str = TaConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case_ :Optional[Any] = TaForConditionalGeneration(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowercase, _lowercase, _lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 66 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =None
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =None
snake_case_ =None
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =True
snake_case_ =None
snake_case_ =1
snake_case_ =None
snake_case_ =False
snake_case_ =None
snake_case_ =None
def lowerCAmelCase__ (self ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(__lowerCamelCase ) for k, v in self.__dict__.items()} )
| 94 |
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : Any = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : List[Any] = input_str.replace(''' ''' ,'''''')
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCamelCase_) == 26
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Union[str, Any] = True
elif char.isupper():
lowerCAmelCase__ : str = True
return all(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : str = "The quick brown fox jumps over the lazy dog" ,):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def lowerCAmelCase__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCAmelCase__ : Optional[Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_faster()''' ,setup=lowerCamelCase_))
print(timeit('''is_pangram_fastest()''' ,setup=lowerCamelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 94 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase : Union[str, Any] = datasets.logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
__lowerCAmelCase : Any = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
__lowerCAmelCase : List[str] = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : Any ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def _lowercase ( self : int , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
if self.config_name == "default":
__magic_name__ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
__magic_name__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=False ) -> int:
"""simple docstring"""
if gpus is None:
__magic_name__ = 1 if torch.cuda.is_available() else 0
__magic_name__ = {"""src""": sources, """mt""": predictions, """ref""": references}
__magic_name__ = [dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) for t in zip(*data.values() )]
__magic_name__ , __magic_name__ = self.scorer.predict(UpperCamelCase__ , gpus=UpperCamelCase__ , progress_bar=UpperCamelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 88 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88 | 1 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
SCREAMING_SNAKE_CASE : int = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
SCREAMING_SNAKE_CASE : Any = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def __UpperCAmelCase ( snake_case_ : Any ) -> str:
"""simple docstring"""
_lowerCAmelCase = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , snake_case_ )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowerCAmelCase = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_lowerCAmelCase = collections.defaultdict(snake_case_ )
_lowerCAmelCase = collections.defaultdict(snake_case_ )
_lowerCAmelCase = collections.defaultdict(snake_case_ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(snake_case_ ):
_lowerCAmelCase = None
if _re_tf_models.match(snake_case_ ) is not None:
_lowerCAmelCase = tf_models
_lowerCAmelCase = _re_tf_models.match(snake_case_ ).groups()[0]
elif _re_flax_models.match(snake_case_ ) is not None:
_lowerCAmelCase = flax_models
_lowerCAmelCase = _re_flax_models.match(snake_case_ ).groups()[0]
elif _re_pt_models.match(snake_case_ ) is not None:
_lowerCAmelCase = pt_models
_lowerCAmelCase = _re_pt_models.match(snake_case_ ).groups()[0]
if lookup_dict is not None:
while len(snake_case_ ) > 0:
if attr_name in model_prefix_to_model_type:
_lowerCAmelCase = True
break
# Try again after removing the last word in the name
_lowerCAmelCase = """""".join(camel_case_split(snake_case_ )[:-1] )
_lowerCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_lowerCAmelCase = list(snake_case_ )
all_models.sort()
_lowerCAmelCase = {"""model_type""": all_models}
_lowerCAmelCase = [pt_models[t] for t in all_models]
_lowerCAmelCase = [tf_models[t] for t in all_models]
_lowerCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_lowerCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_lowerCAmelCase = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_lowerCAmelCase = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_lowerCAmelCase = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_lowerCAmelCase = """AutoTokenizer"""
_lowerCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_lowerCAmelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_lowerCAmelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(snake_case_ , snake_case_ , snake_case_ ):
# The type of pipeline may not exist in this framework
if not hasattr(snake_case_ , snake_case_ ):
continue
# First extract all model_names
_lowerCAmelCase = []
for name in getattr(snake_case_ , snake_case_ ).values():
if isinstance(snake_case_ , snake_case_ ):
model_names.append(snake_case_ )
else:
model_names.extend(list(snake_case_ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = get_frameworks_table()
_lowerCAmelCase = Dataset.from_pandas(snake_case_ )
_lowerCAmelCase = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=snake_case_ )
_lowerCAmelCase = Dataset.from_json(snake_case_ )
_lowerCAmelCase = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(snake_case_ ) )
}
_lowerCAmelCase = update_pipeline_and_auto_class_table(snake_case_ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_lowerCAmelCase = sorted(table.keys() )
_lowerCAmelCase = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_lowerCAmelCase = Dataset.from_pandas(snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(snake_case_ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(snake_case_ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_lowerCAmelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_lowerCAmelCase = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=snake_case_ , repo_type="""dataset""" , token=snake_case_ , commit_message=snake_case_ , )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_lowerCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
_lowerCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
_lowerCAmelCase = pipeline_tasks[key]["""pt"""]
if isinstance(snake_case_ , (list, tuple) ):
_lowerCAmelCase = model[0]
_lowerCAmelCase = model.__name__
if model not in in_table.values():
missing.append(snake_case_ )
if len(snake_case_ ) > 0:
_lowerCAmelCase = """, """.join(snake_case_ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha) | 317 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __UpperCAmelCase ( snake_case_ : bool , snake_case_ : bool ) -> Tuple:
"""simple docstring"""
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : Optional[int] , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : Dict , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
_lowerCAmelCase = random.Random()
_lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = "TensorFlow"
@property
def A__ (self ):
'''simple docstring'''
return tf.__version__
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_speed(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_inference_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_inference )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase )
_lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
_lowerCAmelCase = self._prepare_train_func(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self._measure_memory(_train )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase , decoder_input_ids=lowerCamelCase , training=lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase , training=lowerCamelCase )
_lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
_lowerCAmelCase = (
hasattr(lowerCamelCase , """architectures""" )
and isinstance(config.architectures , lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
_lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model_cls(lowerCamelCase )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
_lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase = config.vocab_size if hasattr(lowerCamelCase , """vocab_size""" ) else config.encoder.vocab_size
_lowerCAmelCase = random_input_ids(lowerCamelCase , lowerCamelCase , lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase = model(lowerCamelCase , decoder_input_ids=lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )[0]
_lowerCAmelCase = tf.gradients(lowerCamelCase , model.trainable_variables )
return gradients
_lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase = timeit.repeat(
lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
_lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
_lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase )
_lowerCAmelCase = meminfo.used
_lowerCAmelCase = Memory(lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
_lowerCAmelCase = None
else:
_lowerCAmelCase = measure_peak_memory_cpu(lowerCamelCase )
_lowerCAmelCase = Memory(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase = stop_memory_tracing(lowerCamelCase )
if memory is None:
_lowerCAmelCase = summary.total
else:
_lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None | 317 | 1 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(UpperCamelCase__, [-1 * len(UpperCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(UpperCamelCase__ )[1][0]
chosen_vertices.add(UpperCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(UpperCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(UpperCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 194 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 | 0 |
"""simple docstring"""
from __future__ import annotations
_UpperCamelCase: Any = list[list[int]]
# assigning initial values to the grid
_UpperCamelCase: Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_UpperCamelCase: Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase__ ( _UpperCAmelCase ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase__ ( _UpperCAmelCase ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
lowercase , lowercase : List[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase : Union[str, Any] = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
lowercase : Tuple = 0
return None
def lowercase__ ( _UpperCAmelCase ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 2_0)
print_solution(example_grid)
print('\nExample grid solution:')
_UpperCamelCase: Optional[int] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 53 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_UpperCamelCase: Optional[int] = logging.get_logger(__name__)
_UpperCamelCase: Union[str, Any] = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'gpt_neo'
_lowerCamelCase = ['past_key_values']
_lowerCamelCase = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Optional[Any], lowerCAmelCase : int=50257, lowerCAmelCase : Tuple=2048, lowerCAmelCase : int=2048, lowerCAmelCase : Tuple=24, lowerCAmelCase : Optional[Any]=[[["global", "local"], 12]], lowerCAmelCase : Optional[int]=16, lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : Dict=256, lowerCAmelCase : Optional[int]="gelu_new", lowerCAmelCase : Any=0.0, lowerCAmelCase : Dict=0.0, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : Dict=0.1, lowerCAmelCase : List[Any]=1e-5, lowerCAmelCase : Optional[Any]=0.02, lowerCAmelCase : Dict=True, lowerCAmelCase : int=50256, lowerCAmelCase : Optional[Any]=50256, **lowerCAmelCase : Any, ) -> Optional[Any]:
lowercase : List[Any] = vocab_size
lowercase : Optional[Any] = max_position_embeddings
lowercase : Dict = hidden_size
lowercase : Optional[Any] = num_layers
lowercase : str = num_heads
lowercase : Optional[int] = intermediate_size
lowercase : List[str] = window_size
lowercase : Dict = activation_function
lowercase : Dict = resid_dropout
lowercase : int = embed_dropout
lowercase : Optional[Any] = attention_dropout
lowercase : Tuple = classifier_dropout
lowercase : Optional[int] = layer_norm_epsilon
lowercase : Dict = initializer_range
lowercase : Optional[Any] = use_cache
lowercase : Union[str, Any] = bos_token_id
lowercase : int = eos_token_id
lowercase : str = attention_types
lowercase : int = self.expand_attention_types_params(lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
@staticmethod
def lowercase ( lowerCAmelCase : str ) -> Optional[Any]:
lowercase : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
import torch
lowercase : Dict = input.size()
lowercase : Optional[int] = len(_UpperCAmelCase )
lowercase : str = shape[dimension]
lowercase : Optional[Any] = torch.arange(0 , _UpperCAmelCase , _UpperCAmelCase )
lowercase : List[str] = torch.div(sizedim - size , _UpperCAmelCase , rounding_mode='floor' ) + 1
lowercase : Any = torch.arange(_UpperCAmelCase ) + low_indices[:min_length][:, None]
lowercase : List[Any] = [slice(_UpperCAmelCase )] * rank
lowercase : int = indices
lowercase : Optional[Any] = input[s]
lowercase : str = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_UpperCAmelCase )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
import torch
lowercase : int = torch.arange(1 , _UpperCAmelCase )
lowercase : List[str] = torch.remainder(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Optional[int] = remainders == 0
lowercase : Tuple = candidates[divisor_indices]
lowercase : Any = torch.max(_UpperCAmelCase )
return largest_divisor, torch.div(_UpperCAmelCase , _UpperCAmelCase , rounding_mode='floor' )
class a__ ( SCREAMING_SNAKE_CASE__ ):
@property
def lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
lowercase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase, direction='inputs' )
lowercase : Dict = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowercase : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase ( self : int ) -> int:
return self._config.num_heads
def lowercase ( self : Tuple, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int = -1, lowerCAmelCase : int = -1, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[TensorType] = None, ) -> Mapping[str, Any]:
lowercase : Union[str, Any] = super(lowerCAmelCase, self ).generate_dummy_inputs(
lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase : int = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase , lowercase : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase : Tuple = seqlen + 2
lowercase : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : Any = [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
lowercase : Optional[int] = common_inputs['attention_mask']
if self.use_past:
lowercase : Optional[int] = ordered_inputs['attention_mask'].dtype
lowercase : Dict = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 )
return ordered_inputs
@property
def lowercase ( self : int ) -> int:
return 13
| 53 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : str = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """nllb-moe"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2_8_1_1_2 , SCREAMING_SNAKE_CASE_ : str=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE_ : Tuple=1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : int=0.05 , SCREAMING_SNAKE_CASE_ : Any=0.05 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[int]="relu" , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[Any]="float32" , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2_8 , SCREAMING_SNAKE_CASE_ : List[Any]=6_4 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : Dict=0.0_01 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0_01 , SCREAMING_SNAKE_CASE_ : List[Any]="all" , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[str]=1.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.2 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Tuple=False , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Dict = d_model
lowerCAmelCase_ : Optional[int] = encoder_ffn_dim
lowerCAmelCase_ : Optional[int] = encoder_layers
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : Tuple = decoder_ffn_dim
lowerCAmelCase_ : List[str] = decoder_layers
lowerCAmelCase_ : List[str] = decoder_attention_heads
lowerCAmelCase_ : List[str] = dropout
lowerCAmelCase_ : Optional[Any] = attention_dropout
lowerCAmelCase_ : Union[str, Any] = activation_dropout
lowerCAmelCase_ : List[Any] = activation_function
lowerCAmelCase_ : List[str] = init_std
lowerCAmelCase_ : Optional[int] = encoder_layerdrop
lowerCAmelCase_ : Optional[int] = decoder_layerdrop
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : Optional[Any] = encoder_layers
lowerCAmelCase_ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : Optional[Any] = router_z_loss_coef
lowerCAmelCase_ : Union[str, Any] = router_aux_loss_coef
lowerCAmelCase_ : Dict = decoder_sparse_step
lowerCAmelCase_ : Dict = encoder_sparse_step
lowerCAmelCase_ : List[str] = num_experts
lowerCAmelCase_ : str = expert_capacity
lowerCAmelCase_ : Optional[int] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
lowerCAmelCase_ : List[str] = router_dtype
lowerCAmelCase_ : List[Any] = router_ignore_padding_tokens
lowerCAmelCase_ : Optional[int] = batch_prioritized_routing
lowerCAmelCase_ : List[Any] = second_expert_policy
lowerCAmelCase_ : Union[str, Any] = normalize_router_prob_before_dropping
lowerCAmelCase_ : Optional[int] = moe_eval_capacity_token_fraction
lowerCAmelCase_ : Optional[Any] = moe_token_dropout
lowerCAmelCase_ : Any = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 224 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """dpr"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : Any=7_6_8 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_2 , SCREAMING_SNAKE_CASE_ : int=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=1E-12 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="absolute" , SCREAMING_SNAKE_CASE_ : int = 0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : List[str] = type_vocab_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : List[str] = projection_dim
lowerCAmelCase_ : List[str] = position_embedding_type
| 224 | 1 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 183 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = GPTSwaTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Union[str, Any] = GPTSwaTokenizer(__a, eos_token="<unk>", bos_token="<unk>", pad_token="<unk>")
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = "This is a test"
_lowerCAmelCase : List[Any] = "This is a test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "<s>"
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a), __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "j")
self.assertEqual(len(__a), 2000)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 2000)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = GPTSwaTokenizer(__a)
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(__a, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), [465, 287, 265, 631, 842])
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
# fmt: off
self.assertListEqual(
__a, ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."], )
# fmt: on
_lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a, [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], )
_lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(__a)
# fmt: off
self.assertListEqual(
__a, ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."])
# fmt: on
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = GPTSwaTokenizer(__a)
_lowerCAmelCase : Optional[int] = ["This is a test", "I was born in 92000, and this is falsé."]
_lowerCAmelCase : Optional[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__a, __a):
self.assertListEqual(tokenizer.encode_fast(__a), __a)
# Test that decode_fast returns the input text
for text, token_ids in zip(__a, __a):
self.assertEqual(tokenizer.decode_fast(__a), __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
_lowerCAmelCase : Union[str, Any] = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a, model_name="AI-Sweden/gpt-sw3-126m", sequences=__a, )
| 36 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_):
snake_case_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a_)])
snake_case_ = np.array(a_)
snake_case_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a_)) , x.transpose()) , a_)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = (1, 2, 1)
snake_case_ = (1, 1, 0, 7)
snake_case_ = SARIMAX(
a_ , exog=a_ , order=a_ , seasonal_order=a_)
snake_case_ = model.fit(disp=a_ , maxiter=6_00 , method='nm')
snake_case_ = model_fit.predict(1 , len(a_) , exog=[test_match])
return result[0]
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1)
regressor.fit(a_ , a_)
snake_case_ = regressor.predict(a_)
return y_pred[0]
def __UpperCAmelCase ( a_):
train_user.sort()
snake_case_ = np.percentile(a_ , 25)
snake_case_ = np.percentile(a_ , 75)
snake_case_ = qa - qa
snake_case_ = qa - (iqr * 0.1)
return low_lim
def __UpperCAmelCase ( a_ , a_):
snake_case_ = 0
snake_case_ = 0
for i in list_vote:
if i > actual_result:
snake_case_ = not_safe + 1
else:
if abs(abs(a_) - abs(a_)) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowercase = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
lowercase = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase = normalize_df[:, 2].tolist()
lowercase = normalize_df[:, 0].tolist()
lowercase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase = normalize_df[:, [1, 2]].tolist()
lowercase = x[: len(x) - 1]
lowercase = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase = total_date[: len(total_date) - 1]
lowercase = total_user[: len(total_user) - 1]
lowercase = total_match[: len(total_match) - 1]
lowercase = total_date[len(total_date) - 1 :]
lowercase = total_user[len(total_user) - 1 :]
lowercase = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 178 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowercase : Any = parser.parse_args()
if args.model_type == "bert":
lowercase : List[str] = BertForMaskedLM.from_pretrained(args.model_name)
lowercase : int = 'bert'
else:
raise ValueError('args.model_type should be \"bert\".')
lowercase : Any = model.state_dict()
lowercase : Optional[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
lowercase : List[Any] = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowercase : List[Any] = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
lowercase : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowercase : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowercase : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowercase : Tuple = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowercase : List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowercase : Dict = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowercase : List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowercase : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowercase : Dict = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowercase : Optional[int] = state_dict['cls.predictions.decoder.weight']
lowercase : Optional[Any] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowercase : Tuple = state_dict[f'''cls.predictions.transform.dense.{w}''']
lowercase : List[str] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 371 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowercase : Tuple = parser.parse_args()
lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 311 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = XLMTokenizer
lowerCAmelCase : Tuple = False
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ : Tuple = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase__ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(_snake_case ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = '''lower newer'''
lowercase__ : Optional[int] = '''lower newer'''
return input_text, output_text
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowercase__ : str = XLMTokenizer(self.vocab_file ,self.merges_file )
lowercase__ : Tuple = '''lower'''
lowercase__ : List[Any] = ['''low''', '''er</w>''']
lowercase__ : str = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
lowercase__ : Optional[int] = tokens + ['''<unk>''']
lowercase__ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,_snake_case )
@slow
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowercase__ : str = tokenizer.encode('''sequence builders''' ,add_special_tokens=_snake_case )
lowercase__ : List[Any] = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=_snake_case )
lowercase__ : Any = tokenizer.build_inputs_with_special_tokens(_snake_case )
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 16 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Any ,**_snake_case : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 16 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( __a , __a ) -> bool:
'''simple docstring'''
if len(__a ) == 0:
return False
UpperCamelCase__ :List[Any] = len(__a ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __a )
else:
return binary_search(a_list[midpoint + 1 :] , __a )
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by comma:\n''').strip()
__snake_case = [int(item.strip()) for item in user_input.split(''',''')]
__snake_case = int(input('''Enter the number to be found in the list:\n''').strip())
__snake_case = '''''' if binary_search(sequence, target) else '''not '''
print(F"""{target} was {not_str}found in {sequence}""") | 219 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def a ( __a ) -> Optional[int]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def a ( __a ) -> str:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ :Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a ) | 219 | 1 |
def A (__A : int ) -> list:
"""simple docstring"""
UpperCAmelCase_ = int(__A )
if n_element < 1:
UpperCAmelCase_ = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (0, 0, 0)
UpperCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
snake_case_ : Optional[Any] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
snake_case_ : str = hamming(int(n))
print("-----------------------------------------------------")
print(f"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 51 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase)
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = 100 , lowercase = None , lowercase = None , lowercase = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
a__ : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate
a__ : int = audio_length_in_s * self.unet.config.sample_rate
a__ : Union[str, Any] = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.')
a__ : str = int(lowercase)
if sample_size % down_scale_factor != 0:
a__ : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.')
a__ : List[Any] = int(lowercase)
a__ : int = next(iter(self.unet.parameters())).dtype
a__ : Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=lowercase)
# set step values
self.scheduler.set_timesteps(lowercase , device=audio.device)
a__ : Union[str, Any] = self.scheduler.timesteps.to(lowercase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
a__ : Dict = self.unet(lowercase , lowercase).sample
# 2. compute previous image: x_t -> t_t-1
a__ : Any = self.scheduler.step(lowercase , lowercase , lowercase).prev_sample
a__ : str = audio.clamp(-1 , 1).float().cpu().numpy()
a__ : List[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase)
| 99 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
__lowerCAmelCase = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def snake_case_ ( ) -> Optional[int]:
lowercase__: Any = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__: List[Any] = bs[:]
lowercase__: Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
lowercase__: List[str] = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def snake_case_ ( snake_case ) -> Dict:
lowercase__: int = set()
lowercase__: int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__: Union[str, Any] = char
return pairs
class __a ( __UpperCamelCase ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Tuple = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
lowercase__: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
lowercase__: Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
lowercase__: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
lowercase__: List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
lowercase__: Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
lowercase__: List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
lowercase__: List[str] = json.load(lowerCAmelCase__ )
lowercase__: int = {v: k for k, v in self.encoder.items()}
lowercase__: int = errors # how to handle errors in decoding
lowercase__: List[Any] = bytes_to_unicode()
lowercase__: str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='utf-8' ) as merges_handle:
lowercase__: List[str] = merges_handle.read().split('\n' )[1:-1]
lowercase__: Tuple = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__: Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__: Dict = {}
lowercase__: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__: Tuple = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase__: str = tuple(lowerCAmelCase__ )
lowercase__: List[str] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowercase__: Tuple = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__: Optional[int] = bigram
lowercase__: Union[str, Any] = []
lowercase__: Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowercase__: List[str] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__: Union[str, Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__: Optional[Any] = tuple(lowerCAmelCase__ )
lowercase__: Any = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowercase__: List[str] = get_pairs(lowerCAmelCase__ )
lowercase__: int = ' '.join(lowerCAmelCase__ )
lowercase__: int = word
return word
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Dict = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
lowercase__: Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Tuple = ''.join(lowerCAmelCase__ )
lowercase__: Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' )
lowercase__: List[str] = 0
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowercase__: str = token_index
writer.write(' '.join(lowerCAmelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Dict = [self.cls_token_id]
lowercase__: Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Any = [self.sep_token_id]
lowercase__: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: List[Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowercase__: Dict = ' ' + text
return (text, kwargs)
| 366 |
from collections import deque
from math import floor
from random import random
from time import time
class __a :
def __init__( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__: int = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
lowercase__: Union[str, Any] = []
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
lowercase__: Tuple = []
lowercase__: Tuple = []
if s == -2:
lowercase__: Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[int] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if c == -1:
lowercase__: int = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Dict:
'''simple docstring'''
lowercase__: int = deque()
lowercase__: Dict = []
if s == -2:
lowercase__: Optional[int] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: str = []
if s == -2:
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: List[Any] = s
lowercase__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Dict = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
lowercase__: int = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = []
lowercase__: int = []
lowercase__: List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Union[str, Any] = []
lowercase__: List[str] = s
lowercase__: Dict = False
lowercase__: Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: List[Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Any = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Union[str, Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: int = s
lowercase__: str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = []
lowercase__: int = []
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Optional[int] = -2
lowercase__: List[Any] = []
lowercase__: List[str] = s
lowercase__: List[Any] = False
lowercase__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Any = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Dict = s
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Optional[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[str]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
class __a :
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> List[Any]:
'''simple docstring'''
# check if the u exists
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__: str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__: Union[str, Any] = [[w, u]]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
lowercase__: str = []
lowercase__: int = []
if s == -2:
lowercase__: Tuple = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
lowercase__: Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = deque()
lowercase__: List[Any] = []
if s == -2:
lowercase__: str = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: str = []
lowercase__: Dict = []
lowercase__: Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Dict = []
lowercase__: List[Any] = s
lowercase__: Union[str, Any] = False
lowercase__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: str = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Dict = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: int = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Tuple = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: Optional[int] = []
lowercase__: Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Tuple = -2
lowercase__: Any = []
lowercase__: int = s
lowercase__: Optional[int] = False
lowercase__: List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Union[str, Any] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Optional[Any] = s
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
| 288 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__a = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__a = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
A : Dict = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["""input_ids""", """attention_mask"""]
A : str = BartTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __a ) != add_prefix_space:
lowercase : Any = getattr(__a , pre_tok_state.pop('''type''' ) )
lowercase : Dict = add_prefix_space
lowercase : Any = pre_tok_class(**__a )
lowercase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : Tuple = '''post_processor'''
lowercase : Union[str, Any] = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
lowercase : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
lowercase : str = tuple(state['''cls'''] )
lowercase : Optional[Any] = False
if state.get('''add_prefix_space''' , __a ) != add_prefix_space:
lowercase : int = add_prefix_space
lowercase : Dict = True
if state.get('''trim_offsets''' , __a ) != trim_offsets:
lowercase : Tuple = trim_offsets
lowercase : Dict = True
if changes_to_apply:
lowercase : int = getattr(__a , state.pop('''type''' ) )
lowercase : str = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
lowercase : Tuple = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : str = kwargs.get('''is_split_into_words''' , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__a , **__a )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = kwargs.get('''is_split_into_words''' , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__a , **__a )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : List[Any] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = [self.sep_token_id]
lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 337 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 0 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =[0 for i in range(len(__lowerCamelCase ) )]
# initialize interval's left pointer and right pointer
lowerCamelCase__ , lowerCamelCase__ : List[Any] =0, 0
for i in range(1 , len(__lowerCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCamelCase__ : Any =min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCamelCase__ : Optional[int] =min_edge
while go_next(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCamelCase__ , lowerCamelCase__ : List[str] =i, i + z_result[i] - 1
return z_result
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : str ):
"""simple docstring"""
return i + z_result[i] < len(__lowerCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCamelCase__ : str =z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__lowerCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict, lowerCamelCase : str, lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : List[Any]=True, lowerCamelCase : Dict=True, lowerCamelCase : List[Any]=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=99, lowerCamelCase : Optional[int]=[1, 1, 2], lowerCamelCase : str=1, lowerCamelCase : List[Any]=32, lowerCamelCase : str=4, lowerCamelCase : Dict=8, lowerCamelCase : List[Any]=37, lowerCamelCase : Optional[int]="gelu_new", lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : List[Any]=0.0, lowerCamelCase : Dict=512, lowerCamelCase : Dict=3, lowerCamelCase : str=0.02, lowerCamelCase : str=3, lowerCamelCase : Optional[int]=4, lowerCamelCase : List[str]=None, lowerCamelCase : Tuple=False, )-> Union[str, Any]:
lowerCamelCase__ : int =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Dict =seq_length
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : int =use_input_mask
lowerCamelCase__ : Tuple =use_token_type_ids
lowerCamelCase__ : int =use_labels
lowerCamelCase__ : Tuple =vocab_size
lowerCamelCase__ : Union[str, Any] =block_sizes
lowerCamelCase__ : Any =num_decoder_layers
lowerCamelCase__ : Optional[Any] =d_model
lowerCamelCase__ : List[str] =n_head
lowerCamelCase__ : List[Any] =d_head
lowerCamelCase__ : Dict =d_inner
lowerCamelCase__ : Dict =hidden_act
lowerCamelCase__ : List[str] =hidden_dropout
lowerCamelCase__ : Union[str, Any] =attention_dropout
lowerCamelCase__ : Union[str, Any] =activation_dropout
lowerCamelCase__ : Dict =max_position_embeddings
lowerCamelCase__ : Dict =type_vocab_size
lowerCamelCase__ : Union[str, Any] =2
lowerCamelCase__ : Optional[int] =num_labels
lowerCamelCase__ : List[str] =num_choices
lowerCamelCase__ : Tuple =scope
lowerCamelCase__ : Optional[int] =initializer_std
# Used in the tests to check the size of the first attention layer
lowerCamelCase__ : List[str] =n_head
# Used in the tests to check the size of the first hidden state
lowerCamelCase__ : Tuple =self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCamelCase__ : List[Any] =sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCamelCase__ : Union[str, Any] =self.num_hidden_layers + 2
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : Dict =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Union[str, Any] =None
if self.use_input_mask:
lowerCamelCase__ : Any =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : int =None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =None
if self.use_labels:
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Optional[int] =FunnelConfig(
vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Dict, )-> Union[str, Any]:
lowerCamelCase__ : Tuple =TFFunnelModel(config=lowerCamelCase )
lowerCamelCase__ : Dict ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Tuple =model(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =[input_ids, input_mask]
lowerCamelCase__ : List[Any] =model(lowerCamelCase )
lowerCamelCase__ : Any =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ : int =False
lowerCamelCase__ : Any =TFFunnelModel(config=lowerCamelCase )
lowerCamelCase__ : str =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ : Dict =False
lowerCamelCase__ : Optional[int] =TFFunnelModel(config=lowerCamelCase )
lowerCamelCase__ : Tuple =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
def snake_case ( self : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Dict, )-> Optional[Any]:
lowerCamelCase__ : List[str] =TFFunnelBaseModel(config=lowerCamelCase )
lowerCamelCase__ : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase )
lowerCamelCase__ : Tuple =[input_ids, input_mask]
lowerCamelCase__ : Any =model(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
lowerCamelCase__ : List[Any] =False
lowerCamelCase__ : Dict =TFFunnelBaseModel(config=lowerCamelCase )
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model) )
lowerCamelCase__ : Union[str, Any] =False
lowerCamelCase__ : Optional[Any] =TFFunnelBaseModel(config=lowerCamelCase )
lowerCamelCase__ : str =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], )-> List[Any]:
lowerCamelCase__ : List[str] =TFFunnelForPreTraining(config=lowerCamelCase )
lowerCamelCase__ : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length) )
def snake_case ( self : str, lowerCamelCase : Tuple, lowerCamelCase : str, lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Tuple, lowerCamelCase : int, )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =TFFunnelForMaskedLM(config=lowerCamelCase )
lowerCamelCase__ : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : List[Any] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Dict, )-> Union[str, Any]:
lowerCamelCase__ : Optional[Any] =self.num_labels
lowerCamelCase__ : Tuple =TFFunnelForSequenceClassification(config=lowerCamelCase )
lowerCamelCase__ : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : List[str] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case ( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Tuple, )-> int:
lowerCamelCase__ : int =self.num_choices
lowerCamelCase__ : List[Any] =TFFunnelForMultipleChoice(config=lowerCamelCase )
lowerCamelCase__ : int =tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Union[str, Any] =tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Optional[Any] =tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Union[str, Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase__ : str =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =self.num_labels
lowerCamelCase__ : Optional[Any] =TFFunnelForTokenClassification(config=lowerCamelCase )
lowerCamelCase__ : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Optional[int], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int], )-> Tuple:
lowerCamelCase__ : Tuple =TFFunnelForQuestionAnswering(config=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Optional[int] =model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case ( self : int )-> List[str]:
lowerCamelCase__ : List[Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Tuple =config_and_inputs
lowerCamelCase__ : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_a = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_a = False
_a = False
def snake_case ( self : str )-> Tuple:
lowerCamelCase__ : Any =TFFunnelModelTester(self )
lowerCamelCase__ : Any =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : List[str] )-> Tuple:
self.config_tester.run_common_tests()
def snake_case ( self : str )-> List[Any]:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : str )-> Dict:
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def snake_case ( self : Dict )-> Any:
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_a = False
_a = False
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Union[str, Any] =TFFunnelModelTester(self, base=lowerCamelCase )
lowerCamelCase__ : Tuple =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Any:
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] )-> Optional[Any]:
lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> int:
lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def snake_case ( self : List[str] )-> Optional[int]:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
| 272 | 1 |
'''simple docstring'''
def _A ( snake_case ) -> list[int]:
if length <= 0 or not isinstance(snake_case , snake_case ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(snake_case )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 250 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__ : Dict = logging.getLogger(__name__)
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
UpperCAmelCase__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
UpperCAmelCase__ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class UpperCamelCase__ :
UpperCAmelCase__ : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys())})
UpperCAmelCase__ : str = field(metadata={'help': 'Should contain the data files for the task.'})
UpperCAmelCase__ : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase__ : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def snake_case ( )-> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__A , __A , __A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
__A = processors[data_args.task_name]()
__A = processor.get_labels()
__A = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__A = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__A = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
__A = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
__A = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__A = trainer.evaluate()
__A = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def snake_case ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 161 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCamelCase :
UpperCAmelCase : Union[str, Any] = BlenderbotConfig
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : int = """gelu"""
def __init__(self : Optional[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : int=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=32 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]=37 , _A : int=0.1 , _A : Any=0.1 , _A : Tuple=20 , _A : List[str]=2 , _A : Dict=1 , _A : Optional[int]=0 , ) -> Any:
__snake_case : List[str] = parent
__snake_case : Optional[int] = batch_size
__snake_case : List[Any] = seq_length
__snake_case : Dict = is_training
__snake_case : str = use_labels
__snake_case : str = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : int = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : Dict = eos_token_id
__snake_case : Optional[int] = pad_token_id
__snake_case : Union[str, Any] = bos_token_id
def _lowercase (self : Any) -> Optional[int]:
__snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__snake_case : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__snake_case : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1)
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case : List[Any] = prepare_blenderbot_inputs_dict(_A , _A , _A)
return config, inputs_dict
def _lowercase (self : int , _A : Optional[Any] , _A : int) -> int:
__snake_case : List[Any] = TFBlenderbotModel(config=_A).get_decoder()
__snake_case : Tuple = inputs_dict['input_ids']
__snake_case : List[Any] = input_ids[:1, :]
__snake_case : int = inputs_dict['attention_mask'][:1, :]
__snake_case : int = inputs_dict['head_mask']
__snake_case : Optional[int] = 1
# first forward pass
__snake_case : int = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A)
__snake_case , __snake_case : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case : int = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__snake_case : str = tf.concat([input_ids, next_tokens] , axis=-1)
__snake_case : int = tf.concat([attention_mask, next_attn_mask] , axis=-1)
__snake_case : str = model(_A , attention_mask=_A)[0]
__snake_case : List[Any] = model(_A , attention_mask=_A , past_key_values=_A)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__snake_case : int = int(ids_tensor((1,) , output_from_past.shape[-1]))
__snake_case : Dict = output_from_no_past[:, -3:, random_slice_idx]
__snake_case : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1E-3)
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
__snake_case : Optional[int] = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__snake_case : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__snake_case : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
UpperCAmelCase : List[Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCAmelCase : Optional[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase : Union[str, Any] = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = False
def _lowercase (self : Dict) -> Tuple:
__snake_case : str = TFBlenderbotModelTester(self)
__snake_case : List[Any] = ConfigTester(self , config_class=_A)
def _lowercase (self : List[Any]) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase (self : str) -> List[str]:
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A)
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
UpperCAmelCase : Tuple = ["""My friends are cool but they eat too many carbs."""]
UpperCAmelCase : Optional[Any] = """facebook/blenderbot-400M-distill"""
@cached_property
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def _lowercase (self : List[str]) -> Optional[Any]:
__snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def _lowercase (self : Optional[Any]) -> Union[str, Any]:
__snake_case : Optional[int] = self.tokenizer(self.src_text , return_tensors='tf')
__snake_case : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
__snake_case : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_A)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 353 | """simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__snake_case : Optional[int] = iter(UpperCAmelCase_ )
while True:
__snake_case : Optional[int] = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Any = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__snake_case : Union[str, Any] = ''
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> list[str]:
'''simple docstring'''
__snake_case : List[str] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__snake_case : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : str = generate_table(UpperCAmelCase_ )
__snake_case : Union[str, Any] = prepare_input(UpperCAmelCase_ )
__snake_case : Tuple = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Any = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = generate_table(UpperCAmelCase_ )
__snake_case : Any = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
__snake_case , __snake_case : Union[str, Any] = divmod(table.index(UpperCAmelCase_ ) , 5 )
__snake_case , __snake_case : Tuple = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 95 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
A : int = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 57 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase__ = pytest.mark.integration
@require_faiss
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(snake_case__ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Dataset = self._create_dummy_dataset()
lowerCAmelCase : Union[str, Any] = dset.map(
lambda snake_case__ , snake_case__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=snake_case__ , keep_in_memory=snake_case__ )
lowerCAmelCase : Union[str, Any] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase , lowerCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=snake_case__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase , lowerCAmelCase : int = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(snake_case__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def lowercase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCAmelCase : List[str] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCAmelCase : str = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=snake_case__ )
lowerCAmelCase , lowerCAmelCase : int = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase : int = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase , lowerCAmelCase : Optional[Any] = index.search(snake_case__ )
self.assertRaises(snake_case__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase , lowerCAmelCase : str = index.search_batch(snake_case__ )
self.assertRaises(snake_case__ , index.search_batch , queries[0] )
lowerCAmelCase : Optional[int] = [scores[0] for scores in total_scores]
lowerCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase : Union[str, Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(snake_case__ ):
lowerCAmelCase : List[Any] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Any = faiss.IndexFlat(5 )
lowerCAmelCase : Union[str, Any] = FaissIndex(custom_index=snake_case__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=snake_case__ ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase : List[str] = 1
lowerCAmelCase , lowerCAmelCase : Tuple = index.search(snake_case__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import faiss
lowerCAmelCase : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase : Union[str, Any] = "index.faiss"
lowerCAmelCase : List[str] = f"""mock://{index_name}"""
index.save(SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options )
lowerCAmelCase : Optional[Any] = FaissIndex.load(SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options )
lowerCAmelCase : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase : Any = 1
lowerCAmelCase , lowerCAmelCase : Optional[int] = index.search(SCREAMING_SNAKE_CASE )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCAmelCase : List[str] = Elasticsearch()
lowerCAmelCase : Dict = {"acknowledged": True}
lowerCAmelCase : Optional[int] = ElasticSearchIndex(es_client=snake_case__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCAmelCase : List[str] = "foo"
lowerCAmelCase : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCAmelCase , lowerCAmelCase : Optional[int] = index.search(snake_case__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase : int = "foo"
lowerCAmelCase : Any = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCAmelCase , lowerCAmelCase : str = index.search(snake_case__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase : Any = ["foo", "bar", "foobar"]
lowerCAmelCase : Optional[int] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCAmelCase , lowerCAmelCase : Any = index.search_batch(snake_case__ )
lowerCAmelCase : Tuple = [scores[0] for scores in total_scores]
lowerCAmelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case__ ) , 0 )
self.assertListEqual([1, 1, 1] , snake_case__ )
# batched queries with timeout
lowerCAmelCase : Optional[Any] = ["foo", "bar", "foobar"]
lowerCAmelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCAmelCase , lowerCAmelCase : Any = index.search_batch(snake_case__ , request_timeout=30 )
lowerCAmelCase : Dict = [scores[0] for scores in total_scores]
lowerCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case__ ) , 0 )
self.assertListEqual([1, 1, 1] , snake_case__ )
| 108 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_lowerCamelCase : Any = None
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : int = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : int = {
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_lowerCamelCase : List[Any] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : int , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Dict="</s>" , UpperCAmelCase__ : Optional[int]="<s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : Dict="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=False , **UpperCAmelCase__ : Optional[Any] , ) ->Union[str, Any]:
'''simple docstring'''
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else mask_token
A__ = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , legacy_behaviour=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = vocab_file
A__ = False if not self.vocab_file else True
A__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens})
A__ = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase__) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A__ = src_lang if src_lang is not None else '''eng_Latn'''
A__ = self.convert_tokens_to_ids(self._src_lang)
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : str) ->None:
'''simple docstring'''
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
A__ = src_lang
A__ = self(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = self.convert_tokens_to_ids(UpperCAmelCase__)
A__ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "eng_Latn" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "fra_Latn" , **UpperCAmelCase__ : int , ) ->BatchEncoding:
'''simple docstring'''
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any]) ->None:
'''simple docstring'''
A__ = self.convert_tokens_to_ids(UpperCAmelCase__)
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
A__ = self.convert_ids_to_tokens(self.prefix_tokens)
A__ = self.convert_ids_to_tokens(self.suffix_tokens)
A__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : str) ->None:
'''simple docstring'''
A__ = self.convert_tokens_to_ids(UpperCAmelCase__)
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
A__ = self.convert_ids_to_tokens(self.prefix_tokens)
A__ = self.convert_ids_to_tokens(self.suffix_tokens)
A__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__):
copyfile(self.vocab_file , UpperCAmelCase__)
return (out_vocab_file,)
| 231 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : int ) ->Union[str, Any]:
snake_case__ : List[str] = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
snake_case__ : Tuple = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case__ : Optional[int] = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
snake_case__ : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case__ : Dict = model(_snake_case )['last_hidden_state'].detach()
self.assertEqual(output.shape, _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1], _snake_case, atol=1e-3 ) )
@slow
def lowercase_ ( self : Any ) ->Union[str, Any]:
snake_case__ : str = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
snake_case__ : Optional[Any] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case__ : str = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
snake_case__ : List[str] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case__ : Optional[int] = model(_snake_case )['last_hidden_state'].detach()
self.assertEqual(output.shape, _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1], _snake_case, atol=1e-3 ) )
| 277 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowercase_ (A : List[str] ):
snake_case__ : Tuple = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase_ (A : str ):
snake_case__ , snake_case__ : Union[str, Any] = emb.weight.shape
snake_case__ : str = nn.Linear(A , A , bias=A )
snake_case__ : str = emb.weight.data
return lin_layer
def lowercase_ (A : Optional[int] , A : Union[str, Any]=None ):
snake_case__ : Any = {}
for old_key in state_dict.keys():
snake_case__ : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
snake_case__ : int = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' )
else:
snake_case__ : Any = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
snake_case__ : Dict = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
snake_case__ : str = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
snake_case__ : str = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
snake_case__ : Tuple = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
snake_case__ : Tuple = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
snake_case__ : Optional[int] = key.replace('final_layer_norm' , 'ff_layer_norm' )
snake_case__ : Dict = state_dict[old_key]
return new_dict
def lowercase_ (A : List[Any] , A : Tuple , A : List[Any] , A : List[str] , A : str = WEIGHTS_NAME ):
snake_case__ : Dict = []
snake_case__ : str = 0
os.makedirs(A , exist_ok=A )
for expert in range(A ):
snake_case__ : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(A ):
snake_case__ : Optional[Any] = torch.load(A )['model']
remove_ignore_keys_(A )
snake_case__ : Optional[Any] = rename_fairseq_keys(A , A )
snake_case__ : Dict = os.path.join(
A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) )
torch.save(A , A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A )[0]].dtype )
# Add the last block
snake_case__ : Tuple = os.path.join(A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) )
snake_case__ : Union[str, Any] = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(A )
snake_case__ : str = rename_fairseq_keys(A , A )
snake_case__ : Any = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A ) == 1:
snake_case__ : Any = os.path.join(A , A )
torch.save(A , A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A , A )
# Otherwise, let's build the index
snake_case__ : Tuple = {}
for idx, shard in enumerate(A ):
snake_case__ : Optional[int] = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(A ):05d}.bin''' )
snake_case__ : List[Any] = os.path.join(A , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A , os.path.join(A , A ) )
for key in shard:
snake_case__ : Any = shard_file
# Add the metadata
snake_case__ : int = {'total_size': total_size}
snake_case__ : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(A , A ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : Any = json.dumps(A , indent=2 , sort_keys=A ) + '\n'
f.write(A )
return metadata, index
if __name__ == "__main__":
a_ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
a_ :Optional[Any] = parser.parse_args()
a_ , a_ :Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
a_ :List[str] = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ :int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 277 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("""T""")
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
return (position - 1) // 2
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
return (2 * position) + 1
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
return (2 * position) + 2
class UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : list[tuple[T, int]] = []
A_ : dict[T, int] = {}
A_ : int = 0
def __len__( self ):
"""simple docstring"""
return self.elements
def __repr__( self ):
"""simple docstring"""
return str(self.heap )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.elements == 0
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
self.heap.append((elem, weight) )
A_ : Optional[Any] = self.elements
self.elements += 1
self._bubble_up(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
A_ , A_ : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
A_ , A_ : Dict = self.heap[0]
self._bubble_down(lowercase )
return elem
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : List[str] = self.position_map[elem]
A_ : List[Any] = (elem, weight)
if position > 0:
A_ : Dict = get_parent_position(lowercase )
A_ , A_ : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase )
else:
self._bubble_down(lowercase )
else:
self._bubble_down(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
A_ : Dict = get_parent_position(lowercase )
A_ , A_ : Optional[int] = self.heap[curr_pos]
A_ , A_ : Tuple = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_up(lowercase )
return None
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[Any] = self.position_map[elem]
A_ , A_ : List[str] = self.heap[curr_pos]
A_ : int = get_child_left_position(lowercase )
A_ : Tuple = get_child_right_position(lowercase )
if child_left_position < self.elements and child_right_position < self.elements:
A_ , A_ : str = self.heap[child_left_position]
A_ , A_ : int = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_down(lowercase )
if child_left_position < self.elements:
A_ , A_ : Tuple = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_down(lowercase )
else:
return None
if child_right_position < self.elements:
A_ , A_ : List[str] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase , lowercase )
return self._bubble_down(lowercase )
return None
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = self.heap[nodea_pos][0]
A_ : Optional[int] = self.heap[nodea_pos][0]
A_ , A_ : Dict = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
A_ : str = nodea_pos
A_ : Dict = nodea_pos
class UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : dict[T, dict[T, int]] = {}
A_ : int = 0
def __repr__( self ):
"""simple docstring"""
return str(self.connections )
def __len__( self ):
"""simple docstring"""
return self.nodes
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if node not in self.connections:
A_ : int = {}
self.nodes += 1
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
self.add_node(lowercase )
self.add_node(lowercase )
A_ : Any = weight
A_ : List[Any] = weight
def UpperCamelCase ( __lowercase : GraphUndirectedWeighted[T] ,):
'''simple docstring'''
A_ : dict[T, int] = {node: maxsize for node in graph.connections}
A_ : dict[T, T | None] = {node: None for node in graph.connections}
A_ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowercase ,__lowercase )
if priority_queue.is_empty():
return dist, parent
# initialization
A_ : Optional[int] = priority_queue.extract_min()
A_ : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ : Optional[int] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase ,dist[neighbour] )
A_ : Dict = node
# running prim's algorithm
while not priority_queue.is_empty():
A_ : List[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowercase ,dist[neighbour] )
A_ : List[Any] = node
return dist, parent
| 192 | from math import isqrt
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 ,isqrt(__lowercase ) + 1 ) )
def UpperCamelCase ( __lowercase : int = 10**6 ):
'''simple docstring'''
A_ : Optional[Any] = 0
A_ : List[str] = 1
A_ : Dict = 7
while prime_candidate < max_prime:
primes_count += is_prime(__lowercase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 192 | 1 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> str:
__a = old_name
if "patch_embed" in old_name:
__a , __a , __a = old_name.split('''.''' )
if layer == "0":
__a = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__a = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__a = old_name.replace('''3''' , '''convolution2''' )
else:
__a = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(r'''\d\.\d''' , lowerCAmelCase__ ):
__a = r'''\b\d{2}\b'''
if bool(re.search(lowerCAmelCase__ , lowerCAmelCase__ ) ):
__a = re.search(r'''\d\.\d\d.''' , lowerCAmelCase__ ).group()
else:
__a = re.search(r'''\d\.\d.''' , lowerCAmelCase__ ).group()
if int(match[0] ) < 6:
__a = old_name.replace(lowerCAmelCase__ , '''''' )
__a = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__a = '''intermediate_stages.''' + trimmed_name
else:
__a = old_name.replace(lowerCAmelCase__ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__a = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__a = str(int(match[2] ) - num_meta4D_last_stage )
__a = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__a = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__a = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__a = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__a = trimmed_name.replace('''fc2''' , '''linear_out''' )
__a = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'''.\d.''' , lowerCAmelCase__ ):
__a = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__a = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__a = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__a = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__a = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__a = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a = new_name.replace('''norm''' , '''layernorm''' )
__a = '''efficientformer.''' + new_name
else:
__a = '''efficientformer.encoder.''' + new_name
return new_name
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ) -> Tuple:
for key in checkpoint.copy().keys():
__a = checkpoint.pop(lowerCAmelCase__ )
__a = val
return checkpoint
def lowercase ( ) -> Any:
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
def lowercase ( lowerCAmelCase__ : Path , lowerCAmelCase__ : Path , lowerCAmelCase__ : Path , lowerCAmelCase__ : bool ) -> Any:
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
__a = EfficientFormerConfig.from_json_file(lowerCAmelCase__ )
__a = EfficientFormerForImageClassificationWithTeacher(lowerCAmelCase__ )
__a = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__a = config.depths[-1] - config.num_metaad_blocks + 1
__a = convert_torch_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
__a = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__a = prepare_img()
__a = 256
__a = 224
__a = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__a = processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__a = Compose(
[
Resize(lowerCAmelCase__ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(lowerCAmelCase__ ),
ToTensor(),
Normalize(lowerCAmelCase__ , lowerCAmelCase__ ),
] )
__a = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
__a = model(lowerCAmelCase__ )
__a = outputs.logits
__a = (1, 1000)
if "l1" in model_name:
__a = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(lowerCAmelCase__ )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=lowerCAmelCase__ , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowercase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 45 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(A_ , 'num_heads' ) )
class lowercase :
def __init__( self , A_ , A_=13 , A_=64 , A_=3 , A_=[16, 48, 96] , A_=[1, 3, 6] , A_=[1, 2, 10] , A_=[7, 3, 3] , A_=[4, 2, 2] , A_=[2, 1, 1] , A_=[2, 2, 2] , A_=[False, False, True] , A_=[0.0, 0.0, 0.0] , A_=0.02 , A_=1e-12 , A_=True , A_=True , A_=2 , ) -> int:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_sizes
UpperCamelCase = patch_stride
UpperCamelCase = patch_padding
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = num_labels
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = num_heads
UpperCamelCase = stride_kv
UpperCamelCase = depth
UpperCamelCase = cls_token
UpperCamelCase = attention_drop_rate
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = CvtModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase , UpperCamelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = CvtForImageClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[str] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__lowercase : Tuple = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[Any] = False
__lowercase : List[str] = False
__lowercase : Dict = False
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = CvtModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(A_ , A_ , A_ ):
UpperCamelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = len(self.model_tester.depth )
self.assertEqual(len(A_ ) , A_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = CvtModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 222 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowercase : List[Any] = True
except ImportError:
lowercase : Union[str, Any] = False
try:
from torch.hub import _get_torch_home
lowercase : List[Any] = _get_torch_home()
except ImportError:
lowercase : int = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
lowercase : str = os.path.join(torch_cache_home, '''transformers''')
lowercase : Dict = '''https://cdn.huggingface.co'''
lowercase : Any = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
lowercase : Optional[int] = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
lowercase : Optional[Any] = os.path.join(PATH, '''config.yaml''')
lowercase : Tuple = os.path.join(PATH, '''attributes.txt''')
lowercase : Tuple = os.path.join(PATH, '''objects.txt''')
lowercase : Union[str, Any] = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
lowercase : List[str] = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
lowercase : Dict = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
lowercase : Any = '''pytorch_model.bin'''
lowercase : Tuple = '''config.yaml'''
def lowerCAmelCase__ ( _a : Optional[int]=OBJECTS , _a : Optional[Any]=ATTRIBUTES ):
snake_case_ : str = []
with open(_a ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
snake_case_ : List[str] = []
with open(_a ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : int = OrderedDict()
with open(_a , "rb" ) as f:
snake_case_ : Optional[Any] = pkl.load(_a )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
snake_case_ : str = ckp.pop(_a )
if isinstance(_a , np.ndarray ):
snake_case_ : Dict = torch.tensor(_a )
else:
assert isinstance(_a , torch.tensor ), type(_a )
snake_case_ : Dict = v
return r
class UpperCAmelCase_ :
'''simple docstring'''
A : int = {}
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "root" , _SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
snake_case_ : Any = name
snake_case_ : List[str] = level
snake_case_ : str = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
snake_case_ : Any = copy.deepcopy(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : str = Config(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE , level=level + 1 )
snake_case_ : Dict = v
setattr(self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = d
def __repr__( self ) -> Optional[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : List[str] = val
snake_case_ : str = val
snake_case_ : Tuple = key.split("." )
snake_case_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) - 1
snake_case_ : Union[str, Any] = self._pointer
if len(_SCREAMING_SNAKE_CASE ) > 1:
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if hasattr(self , _SCREAMING_SNAKE_CASE ) and isinstance(getattr(self , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
setattr(getattr(self , _SCREAMING_SNAKE_CASE ) , ".".join(levels[i:] ) , _SCREAMING_SNAKE_CASE )
if l == last_level:
snake_case_ : Optional[int] = val
else:
snake_case_ : Union[str, Any] = pointer[l]
def _lowerCAmelCase ( self ) -> str:
return self._pointer
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
with open(f'''{file_name}''' , "w" ) as stream:
dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE ) -> Any:
with open(_SCREAMING_SNAKE_CASE ) as stream:
snake_case_ : Optional[int] = load(_SCREAMING_SNAKE_CASE , Loader=_SCREAMING_SNAKE_CASE )
return data
def __str__( self ) -> List[Any]:
snake_case_ : int = " "
if self._name != "root":
snake_case_ : Optional[Any] = f'''{t * (self._level-1)}{self._name}:\n'''
else:
snake_case_ : Any = ""
snake_case_ : Optional[int] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(_SCREAMING_SNAKE_CASE ).__name__})\n'''
snake_case_ : List[str] = level
return r[:-1]
@classmethod
def _lowerCAmelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ , snake_case_ : str = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(_SCREAMING_SNAKE_CASE )
@classmethod
def _lowerCAmelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : str = kwargs.pop("cache_dir" , _SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = kwargs.pop("force_download" , _SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = kwargs.pop("resume_download" , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = kwargs.pop("proxies" , _SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = kwargs.pop("local_files_only" , _SCREAMING_SNAKE_CASE )
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
snake_case_ : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif os.path.isfile(_SCREAMING_SNAKE_CASE ) or is_remote_url(_SCREAMING_SNAKE_CASE ):
snake_case_ : Tuple = pretrained_model_name_or_path
else:
snake_case_ : Tuple = hf_bucket_url(_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , use_cdn=_SCREAMING_SNAKE_CASE )
try:
# Load from URL or cache if already cached
snake_case_ : Tuple = cached_path(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
snake_case_ : List[Any] = Config.load_yaml(_SCREAMING_SNAKE_CASE )
except EnvironmentError:
snake_case_ : str = "Can't load config for"
raise EnvironmentError(_SCREAMING_SNAKE_CASE )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(_SCREAMING_SNAKE_CASE ), kwargs
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : Optional[Any] = torch.load("dump.pt" , map_location=in_tensor.device )
snake_case_ : Dict = in_tensor.numpy()
snake_case_ : List[str] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_a , _a , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(_a , _a , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowerCAmelCase__ ( _a : List[Any] ):
snake_case_ : int = urlparse(_a )
return parsed.scheme in ("http", "https")
def lowerCAmelCase__ ( _a : str , _a : str , _a : Any=True ):
snake_case_ : List[str] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
snake_case_ : Optional[int] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowerCAmelCase__ ( _a : Optional[int] , _a : Dict , _a : str=None , _a : List[str]=0 , _a : Optional[int]=None , ):
snake_case_ : Union[str, Any] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_a , _a ):
ua += "; " + "; ".join("{}/{}".format(_a , _a ) for k, v in user_agent.items() )
elif isinstance(_a , _a ):
ua += "; " + user_agent
snake_case_ : List[str] = {"user-agent": ua}
if resume_size > 0:
snake_case_ : Optional[Any] = "bytes=%d-" % (resume_size,)
snake_case_ : List[Any] = requests.get(_a , stream=_a , proxies=_a , headers=_a )
if response.status_code == 4_16: # Range not satisfiable
return
snake_case_ : List[Any] = response.headers.get("Content-Length" )
snake_case_ : Dict = resume_size + int(_a ) if content_length is not None else None
snake_case_ : Union[str, Any] = tqdm(
unit="B" , unit_scale=_a , total=_a , initial=_a , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_a ) )
temp_file.write(_a )
progress.close()
def lowerCAmelCase__ ( _a : Any , _a : Optional[int]=None , _a : str=False , _a : Tuple=None , _a : str=10 , _a : Optional[Any]=False , _a : str=None , _a : Optional[Any]=False , ):
if cache_dir is None:
snake_case_ : Tuple = TRANSFORMERS_CACHE
if isinstance(_a , _a ):
snake_case_ : Any = str(_a )
os.makedirs(_a , exist_ok=_a )
snake_case_ : str = None
if not local_files_only:
try:
snake_case_ : List[str] = requests.head(_a , allow_redirects=_a , proxies=_a , timeout=_a )
if response.status_code == 2_00:
snake_case_ : int = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
snake_case_ : Tuple = url_to_filename(_a , _a )
# get cache path to put the file
snake_case_ : str = os.path.join(_a , _a )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_a ):
return cache_path
else:
snake_case_ : Optional[Any] = [
file
for file in fnmatch.filter(os.listdir(_a ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(_a ) > 0:
return os.path.join(_a , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(_a ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
snake_case_ : List[str] = cache_path + ".lock"
with FileLock(_a ):
# If the download just completed while the lock was activated.
if os.path.exists(_a ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
snake_case_ : Optional[int] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(_a , "a+b" ) as f:
yield f
snake_case_ : Tuple = _resumable_file_manager
if os.path.exists(_a ):
snake_case_ : Optional[Any] = os.stat(_a ).st_size
else:
snake_case_ : Union[str, Any] = 0
else:
snake_case_ : Dict = partial(tempfile.NamedTemporaryFile , dir=_a , delete=_a )
snake_case_ : List[str] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , _a , temp_file.name , )
http_get(
_a , _a , proxies=_a , resume_size=_a , user_agent=_a , )
os.replace(temp_file.name , _a )
snake_case_ : Union[str, Any] = {"url": url, "etag": etag}
snake_case_ : Optional[int] = cache_path + ".json"
with open(_a , "w" ) as meta_file:
json.dump(_a , _a )
return cache_path
def lowerCAmelCase__ ( _a : Optional[Any] , _a : Optional[int]=None ):
snake_case_ : int = url.encode("utf-8" )
snake_case_ : List[str] = shaaaa(_a )
snake_case_ : List[str] = url_hash.hexdigest()
if etag:
snake_case_ : Optional[Any] = etag.encode("utf-8" )
snake_case_ : Optional[int] = shaaaa(_a )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowerCAmelCase__ ( _a : Dict , _a : Optional[int]=None , _a : List[Any]=False , _a : List[str]=None , _a : Union[str, Any]=False , _a : Any=None , _a : Union[str, Any]=False , _a : Optional[Any]=False , _a : str=False , ):
if cache_dir is None:
snake_case_ : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(_a , _a ):
snake_case_ : Dict = str(_a )
if isinstance(_a , _a ):
snake_case_ : Any = str(_a )
if is_remote_url(_a ):
# URL, so get it from the cache (downloading if necessary)
snake_case_ : Tuple = get_from_cache(
_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , user_agent=_a , local_files_only=_a , )
elif os.path.exists(_a ):
# File, and it exists.
snake_case_ : List[Any] = url_or_filename
elif urlparse(_a ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(_a ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(_a ) )
if extract_compressed_file:
if not is_zipfile(_a ) and not tarfile.is_tarfile(_a ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
snake_case_ , snake_case_ : Any = os.path.split(_a )
snake_case_ : Union[str, Any] = output_file.replace("." , "-" ) + "-extracted"
snake_case_ : Optional[int] = os.path.join(_a , _a )
if os.path.isdir(_a ) and os.listdir(_a ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
snake_case_ : Optional[Any] = output_path + ".lock"
with FileLock(_a ):
shutil.rmtree(_a , ignore_errors=_a )
os.makedirs(_a )
if is_zipfile(_a ):
with ZipFile(_a , "r" ) as zip_file:
zip_file.extractall(_a )
zip_file.close()
elif tarfile.is_tarfile(_a ):
snake_case_ : int = tarfile.open(_a )
tar_file.extractall(_a )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(_a ) )
return output_path_extracted
return output_path
def lowerCAmelCase__ ( _a : Any , _a : str="," ):
assert isinstance(_a , _a )
if os.path.isfile(_a ):
with open(_a ) as f:
snake_case_ : List[str] = eval(f.read() )
else:
snake_case_ : Any = requests.get(_a )
try:
snake_case_ : List[Any] = requests.json()
except Exception:
snake_case_ : str = req.content.decode()
assert data is not None, "could not connect"
try:
snake_case_ : Optional[Any] = eval(_a )
except Exception:
snake_case_ : str = data.split("\n" )
req.close()
return data
def lowerCAmelCase__ ( _a : List[Any] ):
snake_case_ : Union[str, Any] = requests.get(_a )
snake_case_ : Any = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase__ ( _a : Dict ):
snake_case_ : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_a )
with open(_a , "rb" ) as stream:
snake_case_ : Tuple = pkl.load(_a )
snake_case_ : Tuple = weights.pop("model" )
snake_case_ : Optional[Any] = {}
for k, v in model.items():
snake_case_ : Optional[int] = torch.from_numpy(_a )
if "running_var" in k:
snake_case_ : str = torch.tensor([0] )
snake_case_ : Tuple = k.replace("running_var" , "num_batches_tracked" )
snake_case_ : Tuple = zero
return new
def lowerCAmelCase__ ( ):
print(F'''{os.path.abspath(os.path.join(_a , os.pardir ) )}/demo.ipynb''' )
def lowerCAmelCase__ ( _a : Union[str, Any] , _a : List[str]="RGB" ):
assert isinstance(_a , _a )
if os.path.isfile(_a ):
snake_case_ : str = cva.imread(_a )
else:
snake_case_ : Dict = get_image_from_url(_a )
assert img is not None, F'''could not connect to: {im}'''
snake_case_ : Optional[Any] = cva.cvtColor(_a , cva.COLOR_BGR2RGB )
if input_format == "RGB":
snake_case_ : List[str] = img[:, :, ::-1]
return img
def lowerCAmelCase__ ( _a : Optional[Any] , _a : str=1 ):
return (images[i : i + batch] for i in range(0 , len(_a ) , _a ))
| 36 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase : Any = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 | 1 |
from math import factorial
def UpperCAmelCase__ ( lowerCamelCase = 20 ):
lowercase :Any = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowercase :List[str] = n // 2
return int(factorial(lowerCamelCase ) / (factorial(lowerCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
_UpperCAmelCase : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 236 |
import torch
def UpperCAmelCase__ ( ):
if torch.cuda.is_available():
lowercase :Optional[int] = torch.cuda.device_count()
else:
lowercase :Dict = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 236 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
SCREAMING_SNAKE_CASE__ : Tuple = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
SCREAMING_SNAKE_CASE__ : List[str] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
SCREAMING_SNAKE_CASE__ : Dict = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __A ( self : Optional[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=0.9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.5 ) -> Dict:
if NLTK_VERSION >= version.Version('''3.6.5''' ):
__lowerCamelCase = [
meteor_score.single_meteor_score(
word_tokenize(SCREAMING_SNAKE_CASE__ ) , word_tokenize(SCREAMING_SNAKE_CASE__ ) , alpha=SCREAMING_SNAKE_CASE__ , beta=SCREAMING_SNAKE_CASE__ , gamma=SCREAMING_SNAKE_CASE__ )
for ref, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
else:
__lowerCamelCase = [
meteor_score.single_meteor_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , alpha=SCREAMING_SNAKE_CASE__ , beta=SCREAMING_SNAKE_CASE__ , gamma=SCREAMING_SNAKE_CASE__ )
for ref, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return {"meteor": np.mean(SCREAMING_SNAKE_CASE__ )}
| 358 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def __A ( self : Any ) -> Tuple:
return TaConfig.from_pretrained('''google/umt5-base''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, input_dict
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self : Optional[Any] ) -> Any:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : List[Any] ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(
input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval()
# first forward pass
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 )
__lowerCamelCase , __lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval()
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() )
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
a__ : List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a__ : Tuple = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a__ : int = True
a__ : int = False
a__ : Tuple = False
a__ : Optional[int] = True
a__ : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a__ : Tuple = [0.8, 0.9]
def __A ( self : Tuple ) -> Tuple:
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __A ( self : List[str] ) -> Union[str, Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __A ( self : Union[str, Any] ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Any:
__lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
model.to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __A ( self : Tuple ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __A ( self : int ) -> Optional[Any]:
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 339 | 0 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def UpperCamelCase_( snake_case : Tuple , snake_case : Dict ):
'''simple docstring'''
snake_case_ = Mock()
snake_case_ = conn, Mock()
snake_case_ = iter([1, None] )
snake_case_ = lambda snake_case : next(snake_case )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=snake_case )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 85 |
'''simple docstring'''
import math
import os
import sys
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Any = """"""
try:
with open(lowerCAmelCase__ , """rb""" ) as binary_file:
__UpperCAmelCase : int = binary_file.read()
for dat in data:
__UpperCAmelCase : Tuple = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase_ ( lowerCAmelCase__ : dict[str, str] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
"""simple docstring"""
lexicon.pop(lowerCAmelCase__ )
__UpperCAmelCase : List[str] = last_match_id
if math.loga(lowerCAmelCase__ ).is_integer():
for curr_key in lexicon:
__UpperCAmelCase : List[str] = """0""" + lexicon[curr_key]
__UpperCAmelCase : Any = bin(lowerCAmelCase__ )[2:]
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : str = {"""0""": """0""", """1""": """1"""}
__UpperCAmelCase , __UpperCAmelCase : Dict = """""", """"""
__UpperCAmelCase : str = len(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCAmelCase : str = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
index += 1
__UpperCAmelCase : Any = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__UpperCAmelCase : Union[str, Any] = lexicon[curr_string]
result += last_match_id
return result
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : int = os.path.getsize(lowerCAmelCase__ )
__UpperCAmelCase : int = bin(lowerCAmelCase__ )[2:]
__UpperCAmelCase : List[Any] = len(lowerCAmelCase__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : List[str] = 8
try:
with open(lowerCAmelCase__ , """wb""" ) as opened_file:
__UpperCAmelCase : Any = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCAmelCase__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Dict = read_file_binary(lowerCAmelCase__ )
__UpperCAmelCase : str = compress_data(lowerCAmelCase__ )
__UpperCAmelCase : List[str] = add_file_length(lowerCAmelCase__ , lowerCAmelCase__ )
write_file_binary(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 254 | 0 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_A , **_A ):
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {}
def _A ( self , _A , *_A , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().add_tokens(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def _A ( self , _A , *_A , _A=1 , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
if num_vec_per_token == 1:
self.try_adding_tokens(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
output.append(_SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = placeholder_token + f"""_{i}"""
self.try_adding_tokens(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
output.append(_SCREAMING_SNAKE_CASE )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
__SCREAMING_SNAKE_CASE = output
def _A ( self , _A , _A=False , _A=1.0 ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_SCREAMING_SNAKE_CASE ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__SCREAMING_SNAKE_CASE = self.token_map[placeholder_token]
__SCREAMING_SNAKE_CASE = tokens[: 1 + int(len(_SCREAMING_SNAKE_CASE ) * prop_tokens_to_load )]
if vector_shuffle:
__SCREAMING_SNAKE_CASE = copy.copy(_SCREAMING_SNAKE_CASE )
random.shuffle(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text.replace(_SCREAMING_SNAKE_CASE , ' '.join(_SCREAMING_SNAKE_CASE ) )
return text
def __call__( self , _A , *_A , _A=False , _A=1.0 , **_A ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
_SCREAMING_SNAKE_CASE , vector_shuffle=_SCREAMING_SNAKE_CASE , prop_tokens_to_load=_SCREAMING_SNAKE_CASE ) , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _A ( self , _A , *_A , _A=False , _A=1.0 , **_A ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
_SCREAMING_SNAKE_CASE , vector_shuffle=_SCREAMING_SNAKE_CASE , prop_tokens_to_load=_SCREAMING_SNAKE_CASE ) , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 368 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
def __lowercase ( a__ , a__=False ) -> Tuple:
__SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def __lowercase ( a__ , a__ , a__=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
__SCREAMING_SNAKE_CASE = ''
else:
__SCREAMING_SNAKE_CASE = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowercase ( a__ , a__ , a__ ) -> str:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def __lowercase ( a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = DeiTConfig()
# all deit models have fine-tuned heads
__SCREAMING_SNAKE_CASE = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = 'huggingface/label-files'
__SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = int(deit_name[-6:-4] )
__SCREAMING_SNAKE_CASE = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
__SCREAMING_SNAKE_CASE = 1_92
__SCREAMING_SNAKE_CASE = 7_68
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 3
elif deit_name[9:].startswith('small' ):
__SCREAMING_SNAKE_CASE = 3_84
__SCREAMING_SNAKE_CASE = 15_36
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
__SCREAMING_SNAKE_CASE = 10_24
__SCREAMING_SNAKE_CASE = 40_96
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
# load original model from timm
__SCREAMING_SNAKE_CASE = timm.create_model(a__ , pretrained=a__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__SCREAMING_SNAKE_CASE = timm_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ , a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , a__ )
# load HuggingFace model
__SCREAMING_SNAKE_CASE = DeiTForImageClassificationWithTeacher(a__ ).eval()
model.load_state_dict(a__ )
# Check outputs on an image, prepared by DeiTImageProcessor
__SCREAMING_SNAKE_CASE = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=a__ , crop_size=config.image_size )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = encoding['pixel_values']
__SCREAMING_SNAKE_CASE = model(a__ )
__SCREAMING_SNAKE_CASE = timm_model(a__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ , outputs.logits , atol=1E-3 )
Path(a__ ).mkdir(exist_ok=a__ )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : str =parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 118 | 0 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.txt"}
a__ : Union[str, Any] = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
a__ : Tuple = {
"openbmb/cpm-ant-10b": 1_0_2_4,
}
def snake_case ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
__A = collections.OrderedDict()
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as reader:
__A = reader.readlines()
for index, token in enumerate(UpperCAmelCase ):
__A = token.rstrip('\n' )
__A = index
return vocab
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :int , _A :Dict , _A :str="<unk>" , _A :Dict=200 ) -> Optional[Any]:
'''simple docstring'''
__A = vocab
__A = unk_token
__A = max_input_chars_per_word
def lowercase_ ( self :Dict , _A :List[Any] ) -> List[str]:
'''simple docstring'''
__A = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
__A = 0
__A = []
while start < len(_A ):
__A = len(_A )
__A = None
while start < end:
__A = ''.join(chars[start:end] )
if substr in self.vocab:
__A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
__A = end
return sub_tokens
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ['input_ids', 'attention_mask']
UpperCAmelCase__ : List[Any] = False
def __init__( self :str , _A :Any , _A :Optional[Any]="<d>" , _A :str="</d>" , _A :Any="<s>" , _A :Dict="</s>" , _A :List[Any]="<pad>" , _A :Dict="<unk>" , _A :List[Any]="</n>" , _A :str="</_>" , _A :Dict="left" , **_A :Optional[int] , ) -> str:
'''simple docstring'''
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
__A = bod_token
__A = eod_token
__A = load_vocab(_A )
__A = self.encoder[space_token]
__A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
__A = {v: k for k, v in self.encoder.items()}
__A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase_ ( self :List[str] ) -> List[Any]:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def lowercase_ ( self :int ) -> int:
'''simple docstring'''
return self.encoder["\n"]
@property
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self :Any ) -> List[str]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self :Optional[Any] , _A :Any ) -> Tuple:
'''simple docstring'''
__A = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def lowercase_ ( self :List[Any] , _A :Optional[int] , **_A :Optional[Any] ) -> Tuple:
'''simple docstring'''
__A = [i for i in token_ids if i >= 0]
__A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def lowercase_ ( self :Any , _A :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return token in self.encoder
def lowercase_ ( self :Optional[Any] , _A :List[str] ) -> str:
'''simple docstring'''
return "".join(_A )
def lowercase_ ( self :Dict , _A :int ) -> str:
'''simple docstring'''
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def lowercase_ ( self :Any , _A :List[Any] ) -> List[str]:
'''simple docstring'''
return self.decoder.get(_A , self.unk_token )
def lowercase_ ( self :Optional[int] , _A :str , _A :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if os.path.isdir(_A ):
__A = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__A = (filename_prefix + '-' if filename_prefix else '') + save_directory
__A = 0
if " " in self.encoder:
__A = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__A = self.encoder['\n']
del self.encoder["\n"]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__A = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowercase_ ( self :int , _A :List[int] , _A :List[int] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self :Union[str, Any] , _A :List[int] , _A :Optional[List[int]] = None , _A :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 161 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : torch.FloatTensor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : int = 1
@register_to_config
def __init__( self :Optional[Any] , _A :int = 2_000 , _A :float = 0.15 , _A :float = 0.01 , _A :float = 1_348.0 , _A :float = 1E-5 , _A :int = 1 , ) -> Optional[Any]:
'''simple docstring'''
__A = sigma_max
# setable values
__A = None
self.set_sigmas(_A , _A , _A , _A )
def lowercase_ ( self :Optional[int] , _A :torch.FloatTensor , _A :Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase_ ( self :Tuple , _A :int , _A :float = None , _A :Union[str, torch.device] = None ) -> Optional[int]:
'''simple docstring'''
__A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__A = torch.linspace(1 , _A , _A , device=_A )
def lowercase_ ( self :Any , _A :int , _A :float = None , _A :float = None , _A :float = None ) -> Optional[int]:
'''simple docstring'''
__A = sigma_min if sigma_min is not None else self.config.sigma_min
__A = sigma_max if sigma_max is not None else self.config.sigma_max
__A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_A , _A )
__A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__A = torch.exp(torch.linspace(math.log(_A ) , math.log(_A ) , _A ) )
__A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase_ ( self :List[Any] , _A :Any , _A :Optional[Any] ) -> str:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowercase_ ( self :List[str] , _A :torch.FloatTensor , _A :int , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__A = timesteps.to(self.discrete_sigmas.device )
__A = self.discrete_sigmas[timesteps].to(sample.device )
__A = self.get_adjacent_sigma(_A , _A ).to(sample.device )
__A = torch.zeros_like(_A )
__A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__A = diffusion.unsqueeze(-1 )
__A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__A = randn_tensor(
sample.shape , layout=sample.layout , generator=_A , device=sample.device , dtype=sample.dtype )
__A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_A , prev_sample_mean=_A )
def lowercase_ ( self :str , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__A = randn_tensor(sample.shape , layout=sample.layout , generator=_A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__A = step_size.unsqueeze(-1 )
__A = sample + step_size * model_output
__A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def lowercase_ ( self :Any , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
__A = timesteps.to(original_samples.device )
__A = self.discrete_sigmas.to(original_samples.device )[timesteps]
__A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_A ) * sigmas[:, None, None, None]
)
__A = noise + original_samples
return noisy_samples
def __len__( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 161 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : List[str] = 'audio-spectrogram-transformer'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=128 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Optional[int] = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : Tuple = patch_size
UpperCamelCase : Tuple = qkv_bias
UpperCamelCase : Optional[Any] = frequency_stride
UpperCamelCase : Dict = time_stride
UpperCamelCase : Any = max_length
UpperCamelCase : Any = num_mel_bins
| 27 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A : Any = logging.get_logger(__name__)
__A : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A : Optional[Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__A : Any = {'''allegro/herbert-base-cased''': 514}
__A : Optional[Any] = {}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Dict = VOCAB_FILES_NAMES
lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase : List[str] = PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] = HerbertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_="</s>" , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Dict = [self.cls_token_id]
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 27 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.