code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
A_ : List[Any] = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
A_ : str = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase (lowercase_: str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase (lowercase_: str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase () -> None:
A__ : int = """Morse code here!"""
print(lowercase_ )
A__ : Optional[Any] = encrypt(lowercase_ )
print(lowercase_ )
A__ : List[Any] = decrypt(lowercase_ )
print(lowercase_ )
if __name__ == "__main__":
main()
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A_ : int = datasets.utils.logging.get_logger(__name__)
A_ : str = ['names', 'prefix']
A_ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
A_ : List[Any] = ['encoding_errors', 'on_bad_lines']
A_ : Optional[int] = ['date_format']
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: str = ","
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[Union[int, List[int], str]] = "infer"
UpperCAmelCase__: Optional[List[str]] = None
UpperCAmelCase__: Optional[List[str]] = None
UpperCAmelCase__: Optional[Union[int, str, List[int], List[str]]] = None
UpperCAmelCase__: Optional[Union[List[int], List[str]]] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True
UpperCAmelCase__: Optional[Literal["c", "python", "pyarrow"]] = None
UpperCAmelCase__: Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCAmelCase__: Optional[list] = None
UpperCAmelCase__: Optional[list] = None
UpperCAmelCase__: bool = False
UpperCAmelCase__: Optional[Union[int, List[int]]] = None
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Optional[Union[str, List[str]]] = None
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = False
UpperCAmelCase__: bool = True
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: str = "."
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: str = '"'
UpperCAmelCase__: int = 0
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = True
UpperCAmelCase__: int = 0
UpperCAmelCase__: bool = True
UpperCAmelCase__: bool = False
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: int = 1_00_00
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: Optional[str] = "strict"
UpperCAmelCase__: Literal["error", "warn", "skip"] = "error"
UpperCAmelCase__: Optional[str] = None
def __A ( self ):
if self.delimiter is not None:
A__ : Any = self.delimiter
if self.column_names is not None:
A__ : Optional[Any] = self.column_names
@property
def __A ( self ):
A__ : Tuple = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , A__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: Tuple = CsvConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Dict = data_files
if isinstance(A__ , A__ ):
A__ : Any = [files]
A__ : Dict = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Dict = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
A__ : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(A__ ) for feature in self.config.features.values() ):
# cheaper cast
A__ : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=A__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , A__ )
return pa_table
def __A ( self , A__ ):
A__ : Any = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A__ : Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
A__ : Union[str, Any] = pd.read_csv(A__ , iterator=A__ , dtype=A__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A__ ):
A__ : Optional[int] = pa.Table.from_pandas(A__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = LongformerTokenizer
UpperCAmelCase__: Optional[Any] = True
UpperCAmelCase__: Union[str, Any] = LongformerTokenizerFast
UpperCAmelCase__: Optional[Any] = True
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A__ : Optional[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A__ : str = {"""unk_token""": """<unk>"""}
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
def __A ( self , **A__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , **A__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
A__ : Dict = """lower newer"""
A__ : Dict = """lower newer"""
return input_text, output_text
def __A ( self ):
A__ : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : Union[str, Any] = """lower newer"""
A__ : Dict = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A__ : List[Any] = tokenizer.tokenize(A__ ) # , add_prefix_space=True)
self.assertListEqual(A__ , A__ )
A__ : Dict = tokens + [tokenizer.unk_token]
A__ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def __A ( self ):
A__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A__ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A__ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def __A ( self ):
A__ : Tuple = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
A__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
A__ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
A__ : Any = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : Tuple = tokenizer.build_inputs_with_special_tokens(A__ )
A__ : List[Any] = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __A ( self ):
A__ : Any = self.get_tokenizer()
A__ : str = """Encode this sequence."""
A__ : List[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
A__ : Optional[Any] = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A__ , A__ )
A__ : str = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A__ , A__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
A__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
A__ : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A__ , A__ )
# Testing spaces after special tokens
A__ : Dict = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A__ , lstrip=A__ , rstrip=A__ )} ) # mask token has a left space
A__ : Dict = tokenizer.convert_tokens_to_ids(A__ )
A__ : List[Any] = """Encode <mask> sequence"""
A__ : Tuple = """Encode <mask>sequence"""
A__ : Optional[int] = tokenizer.encode(A__ )
A__ : Union[str, Any] = encoded.index(A__ )
A__ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A__ , A__ )
A__ : Optional[int] = tokenizer.encode(A__ )
A__ : Union[str, Any] = encoded.index(A__ )
A__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A__ , A__ )
def __A ( self ):
pass
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : List[Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : int = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
A__ : Union[str, Any] = tokenizer_r.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
A__ : Dict = tokenizer_p.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A__ : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __A ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , A__ )
def __A ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A__ : int = F"""{text_of_1_token} {text_of_1_token}"""
A__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Union[str, Any] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
A__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Union[str, Any] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
A__ : List[str] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : str = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
A__ : str = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Tuple = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
A__ : Any = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Optional[Any] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , )
A__ : int = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Any = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , )
A__ : int = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : List[str] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , )
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=400 , A__=True , A__=32 , A__=True , ):
A__ : List[Any] = parent
A__ : List[str] = batch_size
A__ : Tuple = num_channels
A__ : int = image_size
A__ : Any = min_resolution
A__ : str = max_resolution
A__ : List[Any] = do_resize
A__ : str = size_divisor
A__ : Optional[int] = do_rescale
def __A ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = GLPNImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : str = GLPNImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size_divisor""" ) )
self.assertTrue(hasattr(A__ , """resample""" ) )
self.assertTrue(hasattr(A__ , """do_rescale""" ) )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
A__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self ):
# Initialize image_processing
A__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _a :
'''simple docstring'''
@staticmethod
def __A ( *A__ , **A__ ):
pass
def UpperCamelCase (lowercase_: Image ) -> str:
A__ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __A ( self , A__ , A__ , A__ ):
A__ : Any = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , A__ )
import datasets
A__ : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
A__ : str = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def __A ( self ):
pass
@slow
@require_torch
def __A ( self ):
A__ : Optional[Any] = """Intel/dpt-large"""
A__ : Dict = pipeline("""depth-estimation""" , model=A__ )
A__ : List[str] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
A__ : List[Any] = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.6_6_2 )
@require_torch
def __A ( self ):
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 64 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 1 |
class _a : # Public class to implement a graph
'''simple docstring'''
def __init__( self , A__ , A__ , A__ ):
A__ : Union[str, Any] = row
A__ : int = col
A__ : Any = graph
def __A ( self , A__ , A__ , A__ ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __A ( self , A__ , A__ , A__ ):
# Checking all 8 elements surrounding nth element
A__ : Optional[int] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A__ : int = [-1, 0, 1, -1, 1, -1, 0, 1]
A__ : List[Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , A__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , A__ )
def __A ( self ): # And finally, count all islands.
A__ : Union[str, Any] = [[False for j in range(self.COL )] for i in range(self.ROW )]
A__ : Tuple = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(A__ , A__ , A__ )
count += 1
return count
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger('transformers.models.speecht5')
A_ : List[str] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
A_ : int = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
A_ : str = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
A_ : Any = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
A_ : Any = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
A_ : int = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
A_ : Any = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
A_ : Optional[Any] = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
A_ : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
A_ : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
A_ : int = []
A_ : Optional[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
A_ : Optional[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
A_ : Union[str, Any] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
A_ : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def UpperCamelCase (lowercase_: List[str] , lowercase_: str , lowercase_: str , lowercase_: Union[str, Any] , lowercase_: Optional[int] ) -> int:
for attribute in key.split(""".""" ):
A__ : Dict = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
A__ : Tuple = getattr(lowercase_ , lowercase_ ).shape
else:
A__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ : Any = value
elif weight_type == "weight_g":
A__ : Union[str, Any] = value
elif weight_type == "weight_v":
A__ : Optional[Any] = value
elif weight_type == "bias":
A__ : str = value
elif weight_type == "running_mean":
A__ : List[Any] = value
elif weight_type == "running_var":
A__ : str = value
elif weight_type == "num_batches_tracked":
A__ : List[Any] = value
else:
A__ : Optional[Any] = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Optional[int] ) -> str:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase (lowercase_: Dict , lowercase_: List[str] , lowercase_: Optional[Any] ) -> Optional[int]:
A__ : Any = []
if task == "s2t":
A__ : Any = hf_model.speechta.encoder.prenet.feature_encoder
A__ : Optional[Any] = MAPPING_S2T
A__ : List[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Any = None
A__ : Optional[int] = MAPPING_T2S
A__ : Optional[int] = IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
A__ : List[str] = MAPPING_S2S
A__ : Dict = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowercase_ , lowercase_ ):
logger.info(f"""{name} was ignored""" )
continue
A__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
A__ : Any = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Tuple = key.split(""".*.""" )
if prefix in name and suffix in name:
A__ : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : Dict = True
if "*" in mapped_key:
A__ : int = name.split(lowercase_ )[0].split(""".""" )[-2]
A__ : List[str] = mapped_key.replace("""*""" , lowercase_ )
if "weight_g" in name:
A__ : Optional[Any] = """weight_g"""
elif "weight_v" in name:
A__ : List[Any] = """weight_v"""
elif "bias" in name:
A__ : int = """bias"""
elif "weight" in name:
A__ : Any = """weight"""
elif "running_mean" in name:
A__ : Optional[Any] = """running_mean"""
elif "running_var" in name:
A__ : Tuple = """running_var"""
elif "num_batches_tracked" in name:
A__ : Dict = """num_batches_tracked"""
else:
A__ : Union[str, Any] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Any , lowercase_: Optional[Any] , lowercase_: str , lowercase_: Any ) -> List[str]:
A__ : int = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[int] = name.split(""".""" )
A__ : Any = int(items[0] )
A__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A__ : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A__ : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int , lowercase_: str , lowercase_: List[Any]=None , lowercase_: Tuple=None , lowercase_: Dict=None , ) -> Union[str, Any]:
if config_path is not None:
A__ : Tuple = SpeechTaConfig.from_pretrained(lowercase_ )
else:
A__ : Tuple = SpeechTaConfig()
if task == "s2t":
A__ : Optional[int] = config.max_text_positions
A__ : Any = SpeechTaForSpeechToText(lowercase_ )
elif task == "t2s":
A__ : List[Any] = 1876
A__ : str = 600
A__ : List[Any] = config.max_speech_positions
A__ : Tuple = SpeechTaForTextToSpeech(lowercase_ )
elif task == "s2s":
A__ : Dict = 1876
A__ : int = config.max_speech_positions
A__ : Any = SpeechTaForSpeechToSpeech(lowercase_ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
A__ : Any = SpeechTaTokenizer(lowercase_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] = AddedToken("""<mask>""" , lstrip=lowercase_ , rstrip=lowercase_ )
A__ : List[Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
A__ : Dict = SpeechTaFeatureExtractor()
A__ : List[Any] = SpeechTaProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(lowercase_ )
A__ : List[Any] = torch.load(lowercase_ )
recursively_load_weights(fairseq_checkpoint["""model"""] , lowercase_ , lowercase_ )
model.save_pretrained(lowercase_ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
A_ : List[str] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 64 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 1 |
from __future__ import annotations
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> float:
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float , ) -> float:
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowercase_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Union[str, Any] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: List[str] , lowercase_: Union[str, Any] , lowercase_: Optional[Any] , lowercase_: int ) -> Optional[Any]:
# Initialise PyTorch model
A__ : Dict = FunnelConfig.from_json_file(lowercase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
A__ : Optional[Any] = FunnelBaseModel(lowercase_ ) if base_model else FunnelModel(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
A_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 64 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A_ : List[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def UpperCamelCase (lowercase_: int=None ) -> Any:
if subparsers is not None:
A__ : int = subparsers.add_parser("""tpu-config""" , description=_description )
else:
A__ : Any = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
A__ : Union[str, Any] = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=lowercase_ , default=lowercase_ , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=lowercase_ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=lowercase_ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
A__ : int = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=lowercase_ , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def UpperCamelCase (lowercase_: int ) -> int:
A__ : List[str] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowercase_ ):
A__ : str = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A__ : List[str] = defaults.command_file
if not args.command and defaults.commands is not None:
A__ : Tuple = defaults.commands
if not args.tpu_name:
A__ : int = defaults.tpu_name
if not args.tpu_zone:
A__ : Optional[int] = defaults.tpu_zone
if args.accelerate_version == "dev":
A__ : List[Any] = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
A__ : int = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , lowercase_ ):
A__ : int = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
A__ : List[str] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowercase_ ):
A__ : List[str] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A__ : List[Any] = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
A__ : str = """; """.join(lowercase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A__ : Union[str, Any] = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(lowercase_ )}""" )
return
subprocess.run(lowercase_ )
print("""Successfully setup pod.""" )
def UpperCamelCase () -> Optional[Any]:
A__ : str = tpu_command_parser()
A__ : Dict = parser.parse_args()
tpu_command_launcher(lowercase_ )
| 64 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__: Optional[Any] = '''BlipImageProcessor'''
UpperCAmelCase__: str = '''AutoTokenizer'''
def __init__( self , A__ , A__ ):
A__ : List[Any] = False
super().__init__(A__ , A__ )
A__ : Optional[Any] = self.image_processor
def __call__( self , A__ = None , A__ = None , A__ = True , A__ = False , A__ = None , A__ = None , A__ = 0 , A__ = None , A__ = None , A__ = False , A__ = False , A__ = False , A__ = False , A__ = False , A__ = True , A__ = None , **A__ , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A__ : Optional[int] = self.tokenizer
A__ : int = self.tokenizer(
text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_token_type_ids=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
return text_encoding
# add pixel_values
A__ : Union[str, Any] = self.image_processor(A__ , return_tensors=A__ )
if text is not None:
A__ : Tuple = self.tokenizer(
text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_token_type_ids=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
else:
A__ : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(A__ )
return encoding_image_processor
def __A ( self , *A__ , **A__ ):
return self.tokenizer.batch_decode(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
return self.tokenizer.decode(*A__ , **A__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __A ( self ):
A__ : str = self.tokenizer.model_input_names
A__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 64 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 1 |
from itertools import permutations
def UpperCamelCase (lowercase_: tuple ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A__ : Tuple = [7, 11, 13, 17]
for i, test in enumerate(lowercase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCamelCase (lowercase_: int = 10 ) -> int:
return sum(
int("""""".join(map(lowercase_ , lowercase_ ) ) )
for num in permutations(range(lowercase_ ) )
if is_substring_divisible(lowercase_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : Dict = logging.get_logger(__name__)
A_ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
A_ : Any = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
A_ : Union[str, Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: str = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: List[str] = RoFormerTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A__ ) != strip_accents
):
A__ : Dict = getattr(A__ , pre_tok_state.pop("""type""" ) )
A__ : int = do_lower_case
A__ : str = strip_accents
A__ : int = pre_tok_class(**A__ )
A__ : List[str] = do_lower_case
def __getstate__( self ):
A__ : Union[str, Any] = self.__dict__.copy()
A__ : Tuple = BertPreTokenizer()
return state
def __setstate__( self , A__ ):
A__ : List[Any] = d
A__ : str = self.__dict__["""_tokenizer"""].get_vocab()
A__ : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(A__ ) )
def __A ( self , A__ , A__=None ):
A__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : Dict = [self.sep_token_id]
A__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : str = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def __A ( self , A__ , A__=None , A__=None , A__=False , **A__ , ):
A__ : str = BertPreTokenizer()
return super().save_pretrained(A__ , A__ , A__ , A__ , **A__ )
| 64 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=400 , A__=True , A__=None , A__=True , A__=False , A__=True , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , ):
A__ : int = parent
A__ : Optional[Any] = batch_size
A__ : Tuple = num_channels
A__ : Dict = image_size
A__ : str = min_resolution
A__ : List[str] = max_resolution
A__ : Tuple = do_resize
A__ : Tuple = size if size is not None else {"""height""": 18, """width""": 20}
A__ : Optional[Any] = do_thumbnail
A__ : int = do_align_axis
A__ : List[str] = do_pad
A__ : List[str] = do_normalize
A__ : Any = image_mean
A__ : List[Any] = image_std
def __A ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Tuple = DonutImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : Optional[int] = DonutImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
self.assertTrue(hasattr(A__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(A__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(A__ , """do_pad""" ) )
self.assertTrue(hasattr(A__ , """do_normalize""" ) )
self.assertTrue(hasattr(A__ , """image_mean""" ) )
self.assertTrue(hasattr(A__ , """image_std""" ) )
def __A ( self ):
A__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
A__ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
A__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def __A ( self ):
pass
@is_flaky()
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Union[str, Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def __A ( self ):
# Initialize image_processing
A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
A__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Optional[int] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def __A ( self ):
# Initialize image_processing
A__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
A__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : List[Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 64 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 1 |
import functools
def UpperCamelCase (lowercase_: list[int] , lowercase_: list[int] ) -> int:
# Validation
if not isinstance(lowercase_ , lowercase_ ) or not all(isinstance(lowercase_ , lowercase_ ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(lowercase_ ) != 3 or not all(isinstance(lowercase_ , lowercase_ ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(lowercase_ ) == 0:
return 0
if min(lowercase_ ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(lowercase_ ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
A__ : List[str] = set(lowercase_ )
@functools.cache
def dynamic_programming(lowercase_: int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase () -> Any:
A__ : int = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowercase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowercase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowercase_ )
return parser.parse_args()
def UpperCamelCase () -> List[Any]:
A__ : List[str] = parse_args()
# Import training_script as a module.
A__ : List[str] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A__ : str = script_fpath.stem
A__ : str = importlib.import_module(lowercase_ )
# Patch sys.argv
A__ : Optional[int] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 64 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A_ : Tuple = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__: bool = field(
default=__magic_name__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__: str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__: bool = field(
default=__magic_name__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: Optional[str] = field(default=__magic_name__ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__: bool = field(
default=__magic_name__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__: bool = field(
default=__magic_name__ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __A ( self ):
if self.train_file is not None:
A__ : int = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ : Any = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: PreTrainedTokenizerBase
UpperCAmelCase__: Union[bool, str, PaddingStrategy] = True
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Optional[int] = None
def __call__( self , A__ ):
A__ : Dict = """label""" if """label""" in features[0].keys() else """labels"""
A__ : int = [feature.pop(A__ ) for feature in features]
A__ : Tuple = len(A__ )
A__ : Any = len(features[0]["""input_ids"""] )
A__ : str = [
[{k: v[i] for k, v in feature.items()} for i in range(A__ )] for feature in features
]
A__ : Dict = list(chain(*A__ ) )
A__ : Dict = self.tokenizer.pad(
A__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
A__ : List[Any] = {k: v.view(A__ , A__ , -1 ) for k, v in batch.items()}
# Add back labels
A__ : Any = torch.tensor(A__ , dtype=torch.intaa )
return batch
def UpperCamelCase () -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ : Any = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ : Union[str, Any] = {}
if data_args.train_file is not None:
A__ : List[str] = data_args.train_file
if data_args.validation_file is not None:
A__ : Optional[Any] = data_args.validation_file
A__ : List[Any] = data_args.train_file.split(""".""" )[-1]
A__ : Any = load_dataset(
lowercase_ , data_files=lowercase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ : int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ : Optional[int] = [f"""ending{i}""" for i in range(4 )]
A__ : List[str] = """sent1"""
A__ : List[str] = """sent2"""
if data_args.max_seq_length is None:
A__ : Tuple = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
A__ : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
A__ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase_: Dict ):
A__ : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
A__ : str = examples[question_header_name]
A__ : List[str] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase_ )
]
# Flatten out
A__ : int = list(chain(*lowercase_ ) )
A__ : Any = list(chain(*lowercase_ ) )
# Tokenize
A__ : Optional[Any] = tokenizer(
lowercase_ , lowercase_ , truncation=lowercase_ , max_length=lowercase_ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
A__ : Dict = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
A__ : int = min(len(lowercase_ ) , data_args.max_train_samples )
A__ : str = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
A__ : str = train_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
A__ : Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
A__ : List[Any] = min(len(lowercase_ ) , data_args.max_eval_samples )
A__ : int = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
A__ : Dict = eval_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase_: str ):
A__ , A__ : Union[str, Any] = eval_predictions
A__ : Dict = np.argmax(lowercase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ : Union[str, Any] = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
# Training
if training_args.do_train:
A__ : str = None
if training_args.resume_from_checkpoint is not None:
A__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ : List[Any] = last_checkpoint
A__ : Optional[int] = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ : Tuple = train_result.metrics
A__ : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
A__ : int = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("""train""" , lowercase_ )
trainer.save_metrics("""train""" , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A__ : Any = trainer.evaluate()
A__ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
A__ : Union[str, Any] = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
A__ : Union[str, Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
def UpperCamelCase (lowercase_: Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 64 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 1 |
def UpperCamelCase (lowercase_: list[int] , lowercase_: str ) -> list[int]:
A__ : Optional[int] = int(lowercase_ )
# Initialize Result
A__ : Any = []
# Traverse through all denomination
for denomination in reversed(lowercase_ ):
# Find denominations
while int(lowercase_ ) >= int(lowercase_ ):
total_value -= int(lowercase_ )
answer.append(lowercase_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ : Optional[Any] = []
A_ : Tuple = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
A_ : str = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
A_ : Any = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ : Dict = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
A_ : Any = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f'''Following is minimal change for {value}: ''')
A_ : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 64 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
A_ : List[Any] = logging.get_logger(__name__)
A_ : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : Union[str, Any] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
A_ : Dict = {'allegro/herbert-base-cased': 514}
A_ : Tuple = {}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Optional[int] = HerbertTokenizer
def __init__( self , A__=None , A__=None , A__=None , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__="</s>" , **A__ , ):
super().__init__(
A__ , A__ , tokenizer_file=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , sep_token=A__ , **A__ , )
def __A ( self , A__ , A__ = None ):
A__ : int = [self.cls_token_id]
A__ : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
def __A ( self , A__ , A__ = None ):
A__ : Optional[Any] = [self.sep_token_id]
A__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : List[str] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 64 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 1 |
from math import factorial
def UpperCamelCase (lowercase_: int = 100 ) -> int:
return sum(map(lowercase_ , str(factorial(lowercase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 64 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCamelCase () -> Any:
A__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=lowercase_ , default=lowercase_ , required=lowercase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=lowercase_ , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=lowercase_ , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=lowercase_ , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=lowercase_ , default=0 , help="""cuda_id.""" , )
A__ : Optional[int] = parser.parse_args()
return args
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int , lowercase_: Union[str, Any] ) -> Tuple:
if not len(lowercase_ ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
A__ , A__ : Union[str, Any] = imgs[0].size
A__ : Optional[int] = Image.new("""RGB""" , size=(cols * w, rows * h) )
A__ , A__ : Tuple = grid.size
for i, img in enumerate(lowercase_ ):
grid.paste(lowercase_ , box=(i % cols * w, i // cols * h) )
return grid
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any]="robotic cat with wings" , lowercase_: Tuple=7.5 , lowercase_: Dict=50 , lowercase_: Optional[Any]=1 , lowercase_: Dict=42 , ) -> int:
A__ : List[str] = torch.Generator(pipeline.device ).manual_seed(lowercase_ )
A__ : int = pipeline(
lowercase_ , guidance_scale=lowercase_ , num_inference_steps=lowercase_ , generator=lowercase_ , num_images_per_prompt=lowercase_ , ).images
A__ : Dict = int(math.sqrt(lowercase_ ) )
A__ : Optional[int] = image_grid(lowercase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
A_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
A_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
A_ : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
A_ : Dict = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
A_ : int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
A_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
A_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
A_ : int = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
A_ : Union[str, Any] = unet.to(torch.device('cuda', args.cuda_id))
A_ : int = pipeline.to(unet.device)
A_ , A_ : List[str] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
A_ : Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
A_ : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Dict , lowercase_: Tuple ) -> Dict:
A__ : Tuple = UniSpeechSatForSequenceClassification.from_pretrained(lowercase_ , config=lowercase_ )
A__ : Dict = downstream_dict["""projector.weight"""]
A__ : str = downstream_dict["""projector.bias"""]
A__ : str = downstream_dict["""model.post_net.linear.weight"""]
A__ : Optional[int] = downstream_dict["""model.post_net.linear.bias"""]
return model
def UpperCamelCase (lowercase_: Any , lowercase_: Dict , lowercase_: Optional[int] ) -> Dict:
A__ : Any = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase_ , config=lowercase_ )
A__ : Union[str, Any] = downstream_dict["""model.linear.weight"""]
A__ : Tuple = downstream_dict["""model.linear.bias"""]
return model
def UpperCamelCase (lowercase_: Any , lowercase_: Tuple , lowercase_: int ) -> Optional[Any]:
A__ : List[Any] = UniSpeechSatForXVector.from_pretrained(lowercase_ , config=lowercase_ )
A__ : str = downstream_dict["""connector.weight"""]
A__ : List[Any] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A__ : Optional[int] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
A__ : Dict = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
A__ : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
A__ : List[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
A__ : Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
A__ : List[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
A__ : List[Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def UpperCamelCase (lowercase_: Dict , lowercase_: Tuple , lowercase_: List[Any] , lowercase_: Optional[Any] ) -> int:
A__ : List[str] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : Any = checkpoint["""Downstream"""]
A__ : Optional[Any] = UniSpeechSatConfig.from_pretrained(lowercase_ )
A__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
lowercase_ , return_attention_mask=lowercase_ , do_normalize=lowercase_ )
A__ : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
A__ : List[Any] = convert_classification(lowercase_ , lowercase_ , lowercase_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
A__ : List[Any] = convert_diarization(lowercase_ , lowercase_ , lowercase_ )
elif arch.endswith("""ForXVector""" ):
A__ : str = convert_xvector(lowercase_ , lowercase_ , lowercase_ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
A__ : Optional[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
A_ : Optional[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ ):
A__ : int = params
A__ : Tuple = np.array(A__ )
A__ : Tuple = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A__ ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def __A ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __A ( self ):
A__ : List[str] = self.params.max_model_input_size
A__ : Optional[Any] = self.lengths > max_len
logger.info(F"""Splitting {sum(A__ )} too long sequences.""" )
def divide_chunks(A__ , A__ ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
A__ : Dict = []
A__ : Optional[Any] = []
if self.params.mlm:
A__ , A__ : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
A__ , A__ : Tuple = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A__ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
A__ : List[Any] = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
A__ : Union[str, Any] = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
A__ : int = np.array(A__ )
A__ : List[str] = np.array(A__ )
def __A ( self ):
A__ : Any = len(self )
A__ : Tuple = self.lengths > 11
A__ : List[Any] = self.token_ids[indices]
A__ : str = self.lengths[indices]
A__ : str = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __A ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
A__ : List[str] = self.params.special_tok_ids["""unk_token"""]
A__ : List[str] = len(self )
A__ : Dict = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A__ : Optional[int] = (unk_occs / self.lengths) < 0.5
A__ : Union[str, Any] = self.token_ids[indices]
A__ : Optional[int] = self.lengths[indices]
A__ : int = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __A ( self ):
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __A ( self , A__ ):
A__ : Tuple = [t[0] for t in batch]
A__ : Any = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
A__ : List[str] = max(A__ )
# Pad token ids
if self.params.mlm:
A__ : str = self.params.special_tok_ids["""pad_token"""]
else:
A__ : List[str] = self.params.special_tok_ids["""unk_token"""]
A__ : int = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
A__ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
A__ : Any = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Tuple = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _a (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(A__ )
@classmethod
def __A ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def __A ( self ):
A__ : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
A__ : Optional[Any] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A__ , repo_id="""test-config""" , push_to_hub=A__ , use_auth_token=self._token )
A__ : str = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
def __A ( self ):
A__ : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
A__ : Optional[Any] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A__ , repo_id="""valid_org/test-config-org""" , push_to_hub=A__ , use_auth_token=self._token )
A__ : str = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
def __A ( self ):
CustomConfig.register_for_auto_class()
A__ : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
A__ : Optional[int] = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=A__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A__ : Optional[int] = c.n_embd + 1 # int
A__ : List[str] = c.resid_pdrop + 1.0 # float
A__ : Union[str, Any] = not c.scale_attn_weights # bool
A__ : Optional[Any] = c.summary_type + """foo""" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(A__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(A__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(A__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(A__ , c.summary_type , """mismatch for key: summary_type""" )
def __A ( self ):
A__ : Optional[Any] = PretrainedConfig()
A__ : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
A__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
A__ : int = [key for key, value in config_common_kwargs.items() if value == getattr(A__ , A__ )]
if len(A__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F""" {', '.join(A__ )}.""" )
def __A ( self ):
with self.assertRaises(A__ ):
# config is in subfolder, the following should not work without specifying the subfolder
A__ : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
A__ : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(A__ )
def __A ( self ):
# A mock response for an HTTP head request to emulate server down
A__ : Tuple = mock.Mock()
A__ : Tuple = 500
A__ : Tuple = {}
A__ : Dict = HTTPError
A__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
A__ : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A__ ) as mock_head:
A__ : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self ):
# This test is for deprecated behavior and can be removed in v5
A__ : List[Any] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def __A ( self ):
A__ : Any = AutoConfig.from_pretrained("""bert-base-cased""" )
A__ : str = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(A__ )
A__ : List[str] = 2
json.dump(configuration.to_dict() , open(os.path.join(A__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A__ : Dict = ["""config.42.0.0.json"""]
A__ : Any = 768
configuration.save_pretrained(A__ )
shutil.move(os.path.join(A__ , """config.4.0.0.json""" ) , os.path.join(A__ , """config.42.0.0.json""" ) )
A__ : List[Any] = AutoConfig.from_pretrained(A__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __A ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
A__ : List[Any] = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
A__ : Union[str, Any] = """v4.0.0"""
A__ , A__ : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
A__ , return_unused_kwargs=A__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(A__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A__ : Tuple = """v3.0.0"""
A__ : str = old_transformers.models.auto.AutoConfig.from_pretrained(A__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 64 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A_ : Tuple = pytest.mark.integration
A_ : Union[str, Any] = {'comet'}
A_ : Tuple = importlib.util.find_spec('fairseq') is not None
A_ : Optional[int] = {'code_eval'}
A_ : Union[str, Any] = os.name == 'nt'
A_ : int = {'bertscore', 'frugalscore', 'perplexity'}
A_ : str = importlib.util.find_spec('transformers') is not None
def UpperCamelCase (lowercase_: str ) -> List[str]:
@wraps(lowercase_ )
def wrapper(self: int , lowercase_: List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def UpperCamelCase (lowercase_: str ) -> Optional[int]:
@wraps(lowercase_ )
def wrapper(self: Optional[Any] , lowercase_: Dict ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def UpperCamelCase (lowercase_: Tuple ) -> Tuple:
@wraps(lowercase_ )
def wrapper(self: List[str] , lowercase_: List[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , lowercase_ )
return wrapper
def UpperCamelCase () -> Optional[Any]:
A__ : List[str] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__magic_name__ , __magic_name__ , __magic_name__ )
@local
class _a (parameterized.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = {}
UpperCAmelCase__: Any = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def __A ( self , A__ ):
A__ : int = """[...]"""
A__ : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , A__ ) ).module_path )
A__ : Optional[int] = datasets.load.import_main_class(metric_module.__name__ , dataset=A__ )
# check parameters
A__ : int = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(A__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
A__ : Optional[Any] = doctest.testmod(A__ , verbose=A__ , raise_on_error=A__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __A ( self , A__ ):
A__ : str = """[...]"""
A__ : List[str] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , A__ ) ).module_path )
# run doctest
with self.use_local_metrics():
A__ : int = doctest.testmod(A__ , verbose=A__ , raise_on_error=A__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __A ( self , A__ , A__ ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](A__ ):
yield
else:
yield
@contextmanager
def __A ( self ):
def load_local_metric(A__ , *A__ , **A__ ):
return load_metric(os.path.join("""metrics""" , A__ ) , *A__ , **A__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
A__ : Optional[Any] = load_local_metric
yield
@classmethod
def __A ( cls , A__ ):
def wrapper(A__ ):
A__ : Any = contextmanager(A__ )
A__ : int = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def UpperCamelCase (lowercase_: Optional[Any] ) -> int:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
A__ : List[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def UpperCamelCase (lowercase_: List[Any] ) -> List[Any]:
import torch
def bert_cos_score_idf(lowercase_: Tuple , lowercase_: Union[str, Any] , *lowercase_: Union[str, Any] , **lowercase_: List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
A__ : Union[str, Any] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def UpperCamelCase (lowercase_: str ) -> Optional[Any]:
def load_from_checkpoint(lowercase_: List[Any] ):
class _a :
'''simple docstring'''
def __A ( self , A__ , *A__ , **A__ ):
assert len(A__ ) == 2
A__ : str = [0.1_9, 0.9_2]
return scores, sum(A__ ) / len(A__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
A__ : Optional[Any] = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
A__ : Optional[Any] = load_from_checkpoint
yield
def UpperCamelCase () -> List[str]:
A__ : List[Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
A__ : List[Any] = """ERROR"""
A__ : List[str] = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(lowercase_ , match=re.escape(lowercase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=lowercase_ )
| 64 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = XGLMTokenizer
UpperCAmelCase__: List[Any] = XGLMTokenizerFast
UpperCAmelCase__: Tuple = True
UpperCAmelCase__: str = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Any = XGLMTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
A__ : Tuple = """<pad>"""
A__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def __A ( self ):
A__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(A__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
A__ : Optional[int] = XGLMTokenizer(A__ , keep_accents=A__ )
A__ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : List[str] = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : List[Any] = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A__ , f.name )
A__ : List[str] = XGLMTokenizer(f.name , keep_accents=A__ )
A__ : List[Any] = pickle.dumps(A__ )
pickle.loads(A__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
A__ : List[str] = self.get_tokenizer()
A__ : Dict = self.get_rust_tokenizer()
A__ : int = """I was born in 92000, and this is falsé."""
A__ : List[Any] = tokenizer.tokenize(A__ )
A__ : str = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : Union[str, Any] = tokenizer.encode(A__ , add_special_tokens=A__ )
A__ : List[str] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
A__ : int = self.get_rust_tokenizer()
A__ : Optional[int] = tokenizer.encode(A__ )
A__ : List[str] = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
@slow
def __A ( self ):
A__ : Optional[int] = """Hello World!"""
A__ : Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(A__ , self.big_tokenizer.encode(A__ ) )
@slow
def __A ( self ):
A__ : Dict = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
A__ : str = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(A__ , self.big_tokenizer.encode(A__ ) )
@slow
def __A ( self ):
# fmt: off
A__ : Tuple = {
"""input_ids""": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name="""facebook/xglm-564M""" , padding=A__ , )
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A_ : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A_ : Optional[Any] = logging.getLogger()
def UpperCamelCase () -> Optional[Any]:
A__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
A__ : Tuple = parser.parse_args()
return args.f
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: int="eval" ) -> Dict:
A__ : List[Any] = os.path.join(lowercase_ , f"""{split}_results.json""" )
if os.path.exists(lowercase_ ):
with open(lowercase_ , """r""" ) as f:
return json.load(lowercase_ )
raise ValueError(f"""can't find {path}""" )
A_ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : Tuple = self.get_auto_remove_tmp_dir()
A__ : str = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_flax_glue.main()
A__ : Tuple = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
@slow
def __A ( self ):
A__ : List[str] = self.get_auto_remove_tmp_dir()
A__ : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_clm_flax.main()
A__ : int = get_results(A__ )
self.assertLess(result["""eval_perplexity"""] , 100 )
@slow
def __A ( self ):
A__ : Tuple = self.get_auto_remove_tmp_dir()
A__ : Union[str, Any] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_summarization_flax.main()
A__ : List[Any] = get_results(A__ , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def __A ( self ):
A__ : int = self.get_auto_remove_tmp_dir()
A__ : int = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_mlm_flax.main()
A__ : str = get_results(A__ )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def __A ( self ):
A__ : List[str] = self.get_auto_remove_tmp_dir()
A__ : Union[str, Any] = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_ta_mlm_flax.main()
A__ : str = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.4_2 )
@slow
def __A ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
A__ : Any = 7 if get_gpu_count() > 1 else 2
A__ : List[Any] = self.get_auto_remove_tmp_dir()
A__ : int = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_flax_ner.main()
A__ : Union[str, Any] = get_results(A__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def __A ( self ):
A__ : Optional[Any] = self.get_auto_remove_tmp_dir()
A__ : Tuple = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , """argv""" , A__ ):
run_qa.main()
A__ : Tuple = get_results(A__ )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 64 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class _a :
'''simple docstring'''
UpperCAmelCase__: str
UpperCAmelCase__: str = None
@staticmethod
def __A ( ):
raise NotImplementedError
def __A ( self , A__ , A__ , A__ , **A__ ):
raise NotImplementedError
def __A ( self , A__ ):
raise NotImplementedError
def __A ( self ):
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def __A ( cls ):
return F"""`pip install {cls.pip_package or cls.name}`"""
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = '''optuna'''
@staticmethod
def __A ( ):
return is_optuna_available()
def __A ( self , A__ , A__ , A__ , **A__ ):
return run_hp_search_optuna(A__ , A__ , A__ , **A__ )
def __A ( self , A__ ):
return default_hp_space_optuna(A__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = '''ray'''
UpperCAmelCase__: Any = '''\'ray[tune]\''''
@staticmethod
def __A ( ):
return is_ray_available()
def __A ( self , A__ , A__ , A__ , **A__ ):
return run_hp_search_ray(A__ , A__ , A__ , **A__ )
def __A ( self , A__ ):
return default_hp_space_ray(A__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = '''sigopt'''
@staticmethod
def __A ( ):
return is_sigopt_available()
def __A ( self , A__ , A__ , A__ , **A__ ):
return run_hp_search_sigopt(A__ , A__ , A__ , **A__ )
def __A ( self , A__ ):
return default_hp_space_sigopt(A__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = '''wandb'''
@staticmethod
def __A ( ):
return is_wandb_available()
def __A ( self , A__ , A__ , A__ , **A__ ):
return run_hp_search_wandb(A__ , A__ , A__ , **A__ )
def __A ( self , A__ ):
return default_hp_space_wandb(A__ )
A_ : List[str] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase () -> str:
A__ : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase_ ) > 0:
A__ : Optional[Any] = available_backends[0].name
if len(lowercase_ ) > 1:
logger.info(
f"""{len(lowercase_ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 64 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 1 |
def UpperCamelCase (lowercase_: int ) -> list:
A__ : str = int(lowercase_ )
if n_element < 1:
A__ : Optional[int] = ValueError("""a should be a positive number""" )
raise my_error
A__ : int = [1]
A__ , A__ , A__ : str = (0, 0, 0)
A__ : Union[str, Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
A_ : Optional[int] = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
A_ : str = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 64 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 1 |
def UpperCamelCase (lowercase_: int = 1000000 ) -> int:
A__ : Optional[int] = set(range(3 , lowercase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase_ , lowercase_ ) ) )
A__ : str = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ , limit + 1 , lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase (lowercase_: Dict , lowercase_: Union[str, Any] ) -> Union[str, Any]:
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[int] , lowercase_: Tuple ) -> Optional[int]:
A__ : Dict = tmp_path / """cache"""
A__ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : Union[str, Any] = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCamelCase (lowercase_: int , lowercase_: Optional[int] , lowercase_: List[str] ) -> int:
A__ : Union[str, Any] = tmp_path / """cache"""
A__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : Any = features.copy() if features else default_expected_features
A__ : List[Any] = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : List[Any] = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] , lowercase_: List[str] ) -> Dict:
A__ : Optional[int] = tmp_path / """cache"""
A__ : Optional[int] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
A__ : Optional[int] = features.copy() if features else default_expected_features
A__ : int = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : Union[str, Any] = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCamelCase (lowercase_: int , lowercase_: Tuple ) -> int:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A__ : str = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
A__ : Optional[int] = features.copy()
A__ : Optional[int] = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : Union[str, Any] = tmp_path / """cache"""
A__ : Dict = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase (lowercase_: Dict , lowercase_: Dict , lowercase_: Optional[Any] ) -> Optional[Any]:
A__ : Union[str, Any] = tmp_path / """cache"""
A__ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : List[str] = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCamelCase (lowercase_: str , lowercase_: Union[str, Any] , lowercase_: List[str] ) -> List[Any]:
if issubclass(lowercase_ , lowercase_ ):
A__ : Optional[int] = jsonl_path
elif issubclass(lowercase_ , lowercase_ ):
A__ : Tuple = [jsonl_path]
A__ : int = tmp_path / """cache"""
A__ : Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : Tuple = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: int , lowercase_: Dict , lowercase_: Any=("train",) ) -> Optional[int]:
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
A__ : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Any , lowercase_: int ) -> Dict:
A__ : int = tmp_path / """cache"""
A__ : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : List[str] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCamelCase (lowercase_: str , lowercase_: Dict , lowercase_: Optional[int] ) -> int:
A__ : List[Any] = tmp_path / """cache"""
A__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : str = features.copy() if features else default_expected_features
A__ : Optional[int] = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : str = JsonDatasetReader({"""train""": jsonl_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Any , lowercase_: int ) -> List[str]:
if split:
A__ : List[Any] = {split: jsonl_path}
else:
A__ : List[str] = """train"""
A__ : Optional[Any] = {"""train""": jsonl_path, """test""": jsonl_path}
A__ : Any = tmp_path / """cache"""
A__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A__ : List[Any] = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
return json.load(lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Optional[int]:
return [json.loads(lowercase_ ) for line in buffer]
class _a :
'''simple docstring'''
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , A__ , A__ , A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ ).write()
buffer.seek(0 )
A__ : Union[str, Any] = load_json_function(A__ )
assert isinstance(A__ , A__ )
assert isinstance(exported_content[0] , A__ )
assert len(A__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __A ( self , A__ , A__ , A__ , A__ , A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__ ).write()
buffer.seek(0 )
A__ : Any = load_json(A__ )
assert isinstance(A__ , A__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , A__ , A__ , A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , num_proc=2 ).write()
buffer.seek(0 )
A__ : List[str] = load_json_function(A__ )
assert isinstance(A__ , A__ )
assert isinstance(exported_content[0] , A__ )
assert len(A__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __A ( self , A__ , A__ , A__ , A__ , A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__ , num_proc=2 ).write()
buffer.seek(0 )
A__ : List[Any] = load_json(A__ )
assert isinstance(A__ , A__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A__ ) == 10
def __A ( self , A__ ):
with pytest.raises(A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def __A ( self , A__ , A__ , A__ , A__ , A__ ):
A__ : Dict = tmp_path_factory.mktemp("""data""" ) / F"""test.json.{extension}"""
A__ : Optional[int] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(A__ , A__ , compression=A__ ).write()
with fsspec.open(A__ , """rb""" , compression="""infer""" ) as f:
A__ : Dict = f.read()
with fsspec.open(A__ , """rb""" , compression="""infer""" ) as f:
A__ : Optional[Any] = f.read()
assert exported_content == original_content
| 64 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__: int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__: Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__: Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__: Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self ):
torch.manual_seed(0 )
A__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
A__ : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
A__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A__ : Tuple = CLIPTextModel(A__ )
A__ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Tuple = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Union[str, Any] = torch.manual_seed(A__ )
else:
A__ : Optional[Any] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : Dict = 2
A__ : Tuple = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , )
A__ : List[str] = floats_tensor(control_image.shape , rng=random.Random(A__ ) ).to(A__ )
A__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : str = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((64, 64) )
A__ : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __A ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__: str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__: str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__: Tuple = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __A ( self ):
torch.manual_seed(0 )
A__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(A__ ):
if isinstance(A__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
A__ : Optional[int] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A__ )
torch.manual_seed(0 )
A__ : Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A__ )
torch.manual_seed(0 )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A__ : Any = CLIPTextModel(A__ )
A__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
A__ : Any = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : List[Any] = torch.manual_seed(A__ )
else:
A__ : List[Any] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = 2
A__ : Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , ),
]
A__ : List[Any] = floats_tensor(control_image[0].shape , rng=random.Random(A__ ) ).to(A__ )
A__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Union[str, Any] = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((64, 64) )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __A ( self ):
A__ : List[str] = self.get_dummy_components()
A__ : Tuple = self.pipeline_class(**A__ )
pipe.to(A__ )
A__ : List[str] = 1_0.0
A__ : Union[str, Any] = 4
A__ : Union[str, Any] = self.get_dummy_inputs(A__ )
A__ : str = steps
A__ : Dict = scale
A__ : Optional[Any] = pipe(**A__ )[0]
A__ : Union[str, Any] = self.get_dummy_inputs(A__ )
A__ : Dict = steps
A__ : Dict = scale
A__ : List[str] = pipe(**A__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
A__ : str = self.get_dummy_inputs(A__ )
A__ : Dict = steps
A__ : Optional[int] = scale
A__ : Dict = pipe(**A__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
A__ : List[str] = self.get_dummy_inputs(A__ )
A__ : List[str] = steps
A__ : Optional[int] = scale
A__ : int = pipe(**A__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def __A ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def __A ( self ):
A__ : Optional[Any] = self.get_dummy_components()
A__ : Optional[int] = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Dict = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
A__ : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=A__ , controlnet=A__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A__ )
A__ : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : List[Any] = """evil space-punk bird"""
A__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
A__ : Optional[int] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
A__ : List[str] = pipe(
A__ , A__ , control_image=A__ , generator=A__ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
A__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : List[Any] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[str] = '''biogpt'''
def __init__( self , A__=4_2384 , A__=1024 , A__=24 , A__=16 , A__=4096 , A__="gelu" , A__=0.1 , A__=0.1 , A__=1024 , A__=0.0_2 , A__=1e-12 , A__=True , A__=True , A__=0.0 , A__=0.0 , A__=1 , A__=0 , A__=2 , **A__ , ):
A__ : Optional[int] = vocab_size
A__ : List[str] = max_position_embeddings
A__ : Optional[int] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Dict = hidden_act
A__ : List[Any] = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : Union[str, Any] = initializer_range
A__ : Optional[int] = layer_norm_eps
A__ : Tuple = scale_embedding
A__ : Tuple = use_cache
A__ : Dict = layerdrop
A__ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
| 64 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Dict = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: Optional[Any] ) -> str:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: Any ) -> Any:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
A__ : List[str] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 64 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 16_00, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 16_00, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=A__ , )
assert hasattr(self , """env""" )
def __A ( self , A__ ):
# configuration for running training on smdistributed Model Parallel
A__ : List[str] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
A__ : Union[str, Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
A__ : Dict = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
A__ : Tuple = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=A__ , instance_type=self.instance_type , debugger_hook_config=A__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=A__ , py_version="""py36""" , )
def __A ( self , A__ ):
TrainingJobAnalytics(A__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __A ( self , A__ ):
# create estimator
A__ : int = self.create_estimator(A__ )
# run training
estimator.fit()
# result dataframe
A__ : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
A__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A__ )
| 64 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
A_ : Dict = TypeVar('T')
class _a (Generic[T] ):
'''simple docstring'''
def __init__( self , A__ , A__ ):
A__ : Any | T = None
A__ : int = len(A__ )
A__ : list[T] = [any_type for _ in range(self.N )] + arr
A__ : Any = fnc
self.build()
def __A ( self ):
for p in range(self.N - 1 , 0 , -1 ):
A__ : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self , A__ , A__ ):
p += self.N
A__ : Dict = v
while p > 1:
A__ : List[str] = p // 2
A__ : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self , A__ , A__ ): # noqa: E741
A__ , A__ : List[Any] = l + self.N, r + self.N
A__ : T | None = None
while l <= r:
if l % 2 == 1:
A__ : List[str] = self.st[l] if res is None else self.fn(A__ , self.st[l] )
if r % 2 == 0:
A__ : List[str] = self.st[r] if res is None else self.fn(A__ , self.st[r] )
A__ , A__ : Union[str, Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
A_ : Optional[int] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
A_ : List[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
A_ : Dict = SegmentTree(test_array, min)
A_ : str = SegmentTree(test_array, max)
A_ : str = SegmentTree(test_array, lambda a, b: a + b)
def UpperCamelCase () -> None:
for i in range(len(lowercase_ ) ):
for j in range(lowercase_ , len(lowercase_ ) ):
A__ : List[str] = reduce(lowercase_ , test_array[i : j + 1] )
A__ : Any = reduce(lowercase_ , test_array[i : j + 1] )
A__ : Tuple = reduce(lambda lowercase_ , lowercase_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowercase_ , lowercase_ )
assert max_range == max_segment_tree.query(lowercase_ , lowercase_ )
assert sum_range == sum_segment_tree.query(lowercase_ , lowercase_ )
test_all_segments()
for index, value in test_updates.items():
A_ : Optional[int] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A_ : str = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: Dict , lowercase_: Any , lowercase_: Optional[Any] ) -> Optional[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: Optional[str] , lowercase_: Optional[str] ) -> List[Any]:
A__ : Tuple = to_pil_image(lowercase_ )
A__ , A__ : int = pil_image.size
A__ : Dict = pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type="""dict""" , config=lowercase_ )
A__ , A__ , A__ , A__ , A__ : Dict = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
A__ : List[Any] = [idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
A__ : Union[str, Any] = [word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
A__ : Union[str, Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
A__ : Dict = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
A__ : Any = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
A__ : List[str] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
A__ : int = []
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
A__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
A__ : Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = ['''pixel_values''']
def __init__( self , A__ = True , A__ = None , A__ = PILImageResampling.BILINEAR , A__ = True , A__ = 1 / 255 , A__ = True , A__ = None , A__ = None , A__ = True , A__ = None , A__ = "" , **A__ , ):
super().__init__(**A__ )
A__ : Union[str, Any] = size if size is not None else {"""height""": 224, """width""": 224}
A__ : Tuple = get_size_dict(A__ )
A__ : int = do_resize
A__ : Union[str, Any] = size
A__ : List[str] = resample
A__ : List[str] = do_rescale
A__ : Optional[int] = rescale_value
A__ : Union[str, Any] = do_normalize
A__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
A__ : List[str] = apply_ocr
A__ : Optional[int] = ocr_lang
A__ : int = tesseract_config
def __A ( self , A__ , A__ , A__ = PILImageResampling.BILINEAR , A__ = None , **A__ , ):
A__ : Dict = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
A__ : Tuple = (size["""height"""], size["""width"""])
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ , A__ = None , **A__ , ):
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ , A__ , A__ = None , **A__ , ):
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ = None , A__ = None , A__=None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
A__ : Tuple = do_resize if do_resize is not None else self.do_resize
A__ : Any = size if size is not None else self.size
A__ : Tuple = get_size_dict(A__ )
A__ : str = resample if resample is not None else self.resample
A__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : int = do_normalize if do_normalize is not None else self.do_normalize
A__ : str = image_mean if image_mean is not None else self.image_mean
A__ : List[Any] = image_std if image_std is not None else self.image_std
A__ : Tuple = apply_ocr if apply_ocr is not None else self.apply_ocr
A__ : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
A__ : Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
A__ : Optional[int] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
A__ : List[Any] = [to_numpy_array(A__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
A__ : List[Any] = []
A__ : int = []
for image in images:
A__ , A__ : List[Any] = apply_tesseract(A__ , A__ , A__ )
words_batch.append(A__ )
boxes_batch.append(A__ )
if do_resize:
A__ : int = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_rescale:
A__ : Dict = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
A__ : List[Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
A__ : str = [to_channel_dimension_format(A__ , A__ ) for image in images]
A__ : Optional[int] = BatchFeature(data={"""pixel_values""": images} , tensor_type=A__ )
if apply_ocr:
A__ : str = words_batch
A__ : Any = boxes_batch
return data
| 64 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A_ : List[Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = '''albert'''
def __init__( self , A__=3_0000 , A__=128 , A__=4096 , A__=12 , A__=1 , A__=64 , A__=1_6384 , A__=1 , A__="gelu_new" , A__=0 , A__=0 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=0.1 , A__="absolute" , A__=0 , A__=2 , A__=3 , **A__ , ):
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
A__ : Tuple = vocab_size
A__ : str = embedding_size
A__ : List[Any] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : Dict = num_hidden_groups
A__ : List[str] = num_attention_heads
A__ : List[Any] = inner_group_num
A__ : Optional[int] = hidden_act
A__ : Any = intermediate_size
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : str = max_position_embeddings
A__ : Union[str, Any] = type_vocab_size
A__ : str = initializer_range
A__ : Tuple = layer_norm_eps
A__ : Optional[Any] = classifier_dropout_prob
A__ : Any = position_embedding_type
class _a (__magic_name__ ):
'''simple docstring'''
@property
def __A ( self ):
if self.task == "multiple-choice":
A__ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 64 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 1 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
A_ : int = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = VOCAB_FILES_NAMES
UpperCAmelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: List[Any] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: Tuple = GPTaTokenizer
def __init__( self , A__=None , A__=None , A__=None , A__="<|endoftext|>" , A__="<|endoftext|>" , A__="<|endoftext|>" , A__=False , **A__ , ):
super().__init__(
A__ , A__ , tokenizer_file=A__ , unk_token=A__ , bos_token=A__ , eos_token=A__ , add_prefix_space=A__ , **A__ , )
A__ : Union[str, Any] = kwargs.pop("""add_bos_token""" , A__ )
A__ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(A__ , pre_tok_state.pop("""type""" ) )
A__ : Tuple = add_prefix_space
A__ : Dict = pre_tok_class(**A__ )
A__ : Dict = add_prefix_space
def __A ( self , *A__ , **A__ ):
A__ : Any = kwargs.get("""is_split_into_words""" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
A__ : Optional[Any] = kwargs.get("""is_split_into_words""" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def __A ( self , A__ , A__ = None ):
A__ : Optional[int] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def __A ( self , A__ ):
A__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A__ , add_special_tokens=A__ ) + [self.eos_token_id] )
if len(A__ ) > self.model_max_length:
A__ : str = input_ids[-self.model_max_length :]
return input_ids
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = None
def __A ( self ):
A__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Optional[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A__ )
def __A ( self ):
A__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : str = os.path.join(A__ , """feat_extract.json""" )
feat_extract_first.to_json_file(A__ )
A__ : Optional[Any] = self.feature_extraction_class.from_json_file(A__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
A__ : Optional[Any] = self.feature_extraction_class.from_pretrained(A__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self ):
A__ : int = self.feature_extraction_class()
self.assertIsNotNone(A__ )
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
A_ : Union[str, Any] = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: torch.nn.Module , lowercase_: BnbQuantizationConfig , lowercase_: Union[str, os.PathLike] = None , lowercase_: Optional[Dict[str, Union[int, str, torch.device]]] = None , lowercase_: Optional[List[str]] = None , lowercase_: Optional[Dict[Union[int, str], Union[int, str]]] = None , lowercase_: Optional[Union[str, os.PathLike]] = None , lowercase_: bool = False , ) -> int:
A__ : Union[str, Any] = bnb_quantization_config.load_in_abit
A__ : Optional[int] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A__ : Any = []
# custom device map
if isinstance(lowercase_ , lowercase_ ) and len(device_map.keys() ) > 1:
A__ : Dict = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A__ : str = get_keys_to_not_convert(lowercase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowercase_ )
A__ : Any = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A__ : Optional[int] = []
A__ : List[str] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowercase_ )
# compatibility with peft
A__ : Optional[Any] = load_in_abit
A__ : Any = load_in_abit
A__ : Tuple = get_parameter_device(lowercase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A__ : List[str] = replace_with_bnb_layers(lowercase_ , lowercase_ , modules_to_not_convert=lowercase_ )
# convert param to the right dtype
A__ : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A__ : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A__ : Union[str, Any] = getattr(lowercase_ , lowercase_ , lowercase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowercase_ ):
param.to(lowercase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
A__ : str = replace_with_bnb_layers(
lowercase_ , lowercase_ , modules_to_not_convert=lowercase_ )
A__ : int = get_quantized_model_device_map(
lowercase_ , lowercase_ , lowercase_ , max_memory=lowercase_ , no_split_module_classes=lowercase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A__ : Tuple = True
A__ : Tuple = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowercase_ , lowercase_ , lowercase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowercase_ , offload_state_dict=lowercase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowercase_ , device_map=lowercase_ , offload_dir=lowercase_ )
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Any , lowercase_: List[Any]=None , lowercase_: int=None , lowercase_: List[str]=None ) -> Union[str, Any]:
if device_map is None:
if torch.cuda.is_available():
A__ : Union[str, Any] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowercase_ , lowercase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A__ : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A__ : str = {}
A__ : Tuple = special_dtypes
A__ : List[Any] = no_split_module_classes
A__ : List[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A__ : str = get_balanced_memory(
lowercase_ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowercase_ , **lowercase_ , )
A__ : List[Any] = max_memory
A__ : Union[str, Any] = infer_auto_device_map(lowercase_ , **lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
# check if don't have any quantized module on the cpu
A__ : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A__ : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: List[str]=None , lowercase_: Any=None ) -> Optional[int]:
if modules_to_not_convert is None:
A__ : str = []
A__ , A__ : Tuple = _replace_with_bnb_layers(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[Any] , lowercase_: List[Any]=None , lowercase_: Tuple=None , ) -> Dict:
A__ : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
A__ : Optional[int] = []
current_key_name.append(lowercase_ )
if isinstance(lowercase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A__ : List[Any] = """.""".join(lowercase_ )
A__ : Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A__ : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A__ : List[str] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowercase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A__ : Optional[Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A__ : Dict = module.weight.data
if module.bias is not None:
A__ : Union[str, Any] = module.bias.data
bnb_module.requires_grad_(lowercase_ )
setattr(lowercase_ , lowercase_ , lowercase_ )
A__ : Any = True
if len(list(module.children() ) ) > 0:
A__ , A__ : List[str] = _replace_with_bnb_layers(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ : int = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase (lowercase_: Dict ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
A__ : List[Any] = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A__ : Any = find_tied_parameters(lowercase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase_ , lowercase_ ):
A__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ : Optional[Any] = sum(lowercase_ , [] )
A__ : str = len(lowercase_ ) > 0
# Check if it is a base model
A__ : List[str] = False
if hasattr(lowercase_ , """base_model_prefix""" ):
A__ : List[Any] = not hasattr(lowercase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ : Optional[Any] = list(model.named_children() )
A__ : Any = [list_modules[-1][0]]
# add last module together with tied weights
A__ : List[Any] = set(lowercase_ ) - set(lowercase_ )
A__ : Optional[Any] = list(set(lowercase_ ) ) + list(lowercase_ )
# remove ".weight" from the keys
A__ : List[Any] = [""".weight""", """.bias"""]
A__ : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ : List[Any] = name.replace(lowercase_ , """""" )
filtered_module_names.append(lowercase_ )
return filtered_module_names
def UpperCamelCase (lowercase_: str ) -> List[Any]:
for m in model.modules():
if isinstance(lowercase_ , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase (lowercase_: nn.Module ) -> Optional[int]:
return next(parameter.parameters() ).device
def UpperCamelCase (lowercase_: Dict , lowercase_: str , lowercase_: str , lowercase_: List[Any] , lowercase_: Tuple , lowercase_: Tuple , lowercase_: List[Any] ) -> int:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowercase_ , lowercase_ , 0 , dtype=lowercase_ , value=lowercase_ )
A__ : Optional[Any] = param_name
A__ : Optional[int] = model
if "." in tensor_name:
A__ : Union[str, Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
A__ : List[Any] = getattr(lowercase_ , lowercase_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
A__ : Any = new_module
A__ : List[Any] = splits[-1]
# offload weights
A__ : List[Any] = False
offload_weight(module._parameters[tensor_name] , lowercase_ , lowercase_ , index=lowercase_ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowercase_ , index=lowercase_ , )
else:
offload_weight(lowercase_ , lowercase_ , lowercase_ , index=lowercase_ )
offload_weight(lowercase_ , param_name.replace("""weight""" , """SCB""" ) , lowercase_ , index=lowercase_ )
set_module_tensor_to_device(lowercase_ , lowercase_ , """meta""" , dtype=lowercase_ , value=torch.empty(*param.size() ) )
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__()
A__ : Any = nn.ModuleList(A__ )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = False , A__ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(A__ , A__ , self.nets ) ):
A__ , A__ : List[str] = controlnet(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
# merge samples
if i == 0:
A__ , A__ : List[Any] = down_samples, mid_sample
else:
A__ : List[str] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A__ , A__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __A ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , ):
A__ : Optional[int] = 0
A__ : Union[str, Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A__ , is_main_process=A__ , save_function=A__ , safe_serialization=A__ , variant=A__ , )
idx += 1
A__ : Union[str, Any] = model_path_to_save + F"""_{idx}"""
@classmethod
def __A ( cls , A__ , **A__ ):
A__ : Union[str, Any] = 0
A__ : Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
A__ : Union[str, Any] = pretrained_model_path
while os.path.isdir(A__ ):
A__ : Any = ControlNetModel.from_pretrained(A__ , **A__ )
controlnets.append(A__ )
idx += 1
A__ : List[str] = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(A__ )} controlnets loaded from {pretrained_model_path}.""" )
if len(A__ ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(A__ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(A__ )
| 64 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=50 , A__=0.0_2 , A__=True , A__=None , ):
A__ : Optional[Any] = parent
A__ : Optional[Any] = batch_size
A__ : Any = seq_length
A__ : Any = is_training
A__ : List[Any] = use_input_mask
A__ : int = vocab_size
A__ : int = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : Dict = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Any = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : Optional[int] = initializer_range
A__ : Optional[int] = use_labels
A__ : List[Any] = scope
def __A ( self ):
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Union[str, Any] = None
if self.use_input_mask:
A__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __A ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=A__ , initializer_range=self.initializer_range , )
def __A ( self ):
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[str] = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , A__ , A__ , A__ , A__ , **A__ , ):
A__ : List[str] = BertGenerationEncoder(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[int] = model(A__ , attention_mask=A__ )
A__ : List[str] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , **A__ , ):
A__ : List[str] = True
A__ : List[Any] = BertGenerationEncoder(config=A__ )
model.to(A__ )
model.eval()
A__ : List[str] = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
A__ : Any = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , **A__ , ):
A__ : str = True
A__ : Dict = True
A__ : str = BertGenerationDecoder(config=A__ ).to(A__ ).eval()
# first forward pass
A__ : Any = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , use_cache=A__ , )
A__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
A__ : Optional[int] = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , output_hidden_states=A__ , )["""hidden_states"""][0]
A__ : Dict = model(
A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , past_key_values=A__ , output_hidden_states=A__ , )["""hidden_states"""][0]
# select random slice
A__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1e-3 ) )
def __A ( self , A__ , A__ , A__ , A__ , *A__ , ):
A__ : Dict = BertGenerationDecoder(A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self ):
A__ , A__ , A__ , A__ : str = self.prepare_config_and_inputs()
A__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase__: Union[str, Any] = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase__: Optional[Any] = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __A ( self ):
A__ : Union[str, Any] = BertGenerationEncoderTester(self )
A__ : str = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ , A__ , A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs()
A__ : int = """bert"""
self.model_tester.create_and_check_model(A__ , A__ , A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A__ )
def __A ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
A__ , A__ , A__ , A__ , A__ , A__ , )
def __A ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*A__ )
@slow
def __A ( self ):
A__ : int = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(A__ )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A__ : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A__ : Union[str, Any] = model(A__ )[0]
A__ : str = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , A__ )
A__ : Tuple = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Optional[int] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A__ : Union[str, Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A__ : str = model(A__ )[0]
A__ : str = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , A__ )
A__ : Optional[Any] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) )
| 64 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=False , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ):
A__ : List[Any] = parent
A__ : str = batch_size
A__ : List[Any] = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : List[Any] = use_token_type_ids
A__ : Tuple = use_labels
A__ : Dict = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : Any = intermediate_size
A__ : Optional[int] = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[Any] = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Optional[int] = type_sequence_label_size
A__ : Dict = initializer_range
A__ : Union[str, Any] = num_labels
A__ : List[str] = num_choices
A__ : Union[str, Any] = scope
def __A ( self ):
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[Any] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Tuple = None
A__ : Optional[Any] = None
A__ : str = None
if self.use_labels:
A__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : str = ids_tensor([self.batch_size] , self.num_choices )
A__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = BioGptModel(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[Any] = model(A__ , attention_mask=A__ )
A__ : Optional[Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : List[str] = BioGptForCausalLM(config=A__ )
model.to(A__ )
model.eval()
A__ : Optional[Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , *A__ ):
A__ : str = BioGptModel(config=A__ )
model.to(A__ )
model.eval()
# create attention mask
A__ : str = torch.ones(input_ids.shape , dtype=torch.long , device=A__ )
A__ : str = self.seq_length // 2
A__ : Any = 0
# first forward pass
A__ , A__ : List[str] = model(A__ , attention_mask=A__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : str = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : str = ids_tensor((1,) , A__ ).item() + 1
A__ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : Dict = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A__ )] , dim=1 , )
# get two different outputs
A__ : Any = model(A__ , attention_mask=A__ )["""last_hidden_state"""]
A__ : Optional[int] = model(A__ , past_key_values=A__ , attention_mask=A__ )["""last_hidden_state"""]
# select random slice
A__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Optional[int] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1e-3 ) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , *A__ ):
A__ : Optional[Any] = BioGptModel(config=A__ ).to(A__ ).eval()
A__ : int = torch.ones(input_ids.shape , dtype=torch.long , device=A__ )
# first forward pass
A__ : int = model(A__ , attention_mask=A__ , use_cache=A__ )
A__ , A__ : Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : str = model(A__ , attention_mask=A__ )["""last_hidden_state"""]
A__ : Optional[int] = model(A__ , attention_mask=A__ , past_key_values=A__ )[
"""last_hidden_state"""
]
# select random slice
A__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1e-3 ) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , *A__ , A__=False ):
A__ : Optional[int] = BioGptForCausalLM(A__ )
model.to(A__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : str = model(A__ , labels=A__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __A ( self , A__ , *A__ ):
A__ : Optional[Any] = BioGptModel(A__ )
A__ : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def __A ( self , A__ , A__ , A__ , A__ , A__ , *A__ ):
A__ : List[str] = self.num_labels
A__ : str = BioGptForTokenClassification(A__ )
model.to(A__ )
model.eval()
A__ : Any = model(A__ , attention_mask=A__ , token_type_ids=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ):
A__ : Dict = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[str] = config_and_inputs
A__ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__: Dict = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__: List[Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__: int = False
def __A ( self ):
A__ : Optional[int] = BioGptModelTester(self )
A__ : Optional[int] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : Optional[Any] = type
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A__ , gradient_checkpointing=A__ )
def __A ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A__ )
@slow
def __A ( self ):
A__ : Union[str, Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(A__ )
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Optional[Any] = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Dict = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : int = tokenizer(A__ , return_tensors="""pt""" , padding=A__ )
A__ : Union[str, Any] = inputs["""input_ids"""].to(A__ )
A__ : Optional[Any] = model.generate(
input_ids=A__ , attention_mask=inputs["""attention_mask"""].to(A__ ) , )
A__ : str = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(A__ )
A__ : Dict = model.generate(input_ids=A__ )
A__ : int = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : int = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(A__ )
A__ : List[Any] = model.generate(input_ids=A__ , max_length=model.config.max_length - num_paddings )
A__ : Union[str, Any] = tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
A__ : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A__ )
A__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=A__ )
A__ : Optional[Any] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , [non_padded_sentence, padded_sentence] )
@slow
def __A ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : int = BioGptModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def __A ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : str = 3
A__ : Union[str, Any] = input_dict["""input_ids"""]
A__ : List[str] = input_ids.ne(1 ).to(A__ )
A__ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : str = BioGptForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , attention_mask=A__ , labels=A__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : Tuple = """multi_label_classification"""
A__ : Tuple = input_dict["""input_ids"""]
A__ : Optional[int] = input_ids.ne(1 ).to(A__ )
A__ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : List[str] = BioGptForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : Any = model(A__ , attention_mask=A__ , labels=A__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : int = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Union[str, Any] = model(A__ )[0]
A__ : List[str] = 4_2384
A__ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A__ )
A__ : List[str] = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) )
@slow
def __A ( self ):
A__ : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : int = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(A__ )
torch.manual_seed(0 )
A__ : Dict = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(A__ )
A__ : Any = model.generate(
**A__ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=A__ , )
A__ : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=A__ )
A__ : Union[str, Any] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(A__ , A__ )
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import numpy as np
def UpperCamelCase (lowercase_: Tuple , lowercase_: int , lowercase_: str , lowercase_: Any , lowercase_: Any ) -> List[Any]:
A__ : int = int(np.ceil((x_end - xa) / h ) )
A__ : str = np.zeros((n + 1,) )
A__ : int = ya
A__ : List[Any] = xa
for k in range(lowercase_ ):
A__ : int = f(lowercase_ , y[k] )
A__ : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A__ : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A__ : int = f(x + h , y[k] + h * ka )
A__ : Optional[Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 1 |
import inspect
import unittest
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __A ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
A__ : Tuple = inspect.getmembers(A__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
A__ : Dict = """k-diffusion"""
elif backend == "invisible_watermark":
A__ : List[Any] = """invisible-watermark"""
assert backend in deps, F"""{backend} is not in the deps table!"""
| 64 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: Dict , lowercase_: Tuple=False ) -> str:
A__ : Optional[int] = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase (lowercase_: Dict , lowercase_: Tuple , lowercase_: Optional[int]=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Optional[int] = """"""
else:
A__ : List[str] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A__ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ : str = in_proj_weight[
: config.hidden_size, :
]
A__ : Any = in_proj_bias[: config.hidden_size]
A__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : str = in_proj_weight[
-config.hidden_size :, :
]
A__ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCamelCase (lowercase_: List[Any] ) -> Optional[Any]:
A__ : int = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: int , lowercase_: Any , lowercase_: str ) -> Dict:
A__ : Optional[Any] = dct.pop(lowercase_ )
A__ : Tuple = val
def UpperCamelCase () -> Tuple:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : str = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: str=False ) -> str:
A__ : Dict = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=lowercase_ , )
A__ : List[Any] = ViTHybridConfig(backbone_config=lowercase_ , image_size=384 , num_labels=1000 )
A__ : Any = False
# load original model from timm
A__ : List[Any] = timm.create_model(lowercase_ , pretrained=lowercase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase_ )
A__ : Any = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ , lowercase_ )
A__ : Any = """huggingface/label-files"""
A__ : Optional[int] = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
A__ : Union[str, Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : Tuple = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ : Union[str, Any] = ViTHybridModel(lowercase_ ).eval()
else:
A__ : Dict = ViTHybridForImageClassification(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
# create image processor
A__ : List[Any] = create_transform(**resolve_data_config({} , model=lowercase_ ) )
A__ : int = transform.transforms
A__ : str = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
A__ : Tuple = ViTHybridImageProcessor(
do_resize=lowercase_ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase_ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ : Any = prepare_img()
A__ : List[str] = transform(lowercase_ ).unsqueeze(0 )
A__ : List[Any] = processor(lowercase_ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase_ , lowercase_ )
# verify logits
with torch.no_grad():
A__ : Optional[int] = model(lowercase_ )
A__ : str = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
A__ : Any = timm_model.forward_features(lowercase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase_ , outputs.pooler_output , atol=1E-3 )
else:
A__ : Optional[int] = timm_model(lowercase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase_ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase_ )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
A_ : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCamelCase (lowercase_: int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(lowercase_ , lowercase_ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowercase_ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
A__ : Any = QuantumRegister(lowercase_ , """qr""" )
A__ : Any = ClassicalRegister(lowercase_ , """cr""" )
A__ : Optional[Any] = QuantumCircuit(lowercase_ , lowercase_ )
A__ : Optional[Any] = number_of_qubits
for i in range(lowercase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase_ , lowercase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase_ , lowercase_ )
# simulate with 10000 shots
A__ : int = Aer.get_backend("""qasm_simulator""" )
A__ : List[Any] = execute(lowercase_ , lowercase_ , shots=10000 )
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 64 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 1 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Optional[int] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''vit_msn'''
def __init__( self , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.0 , A__=0.0 , A__=0.0_2 , A__=1e-06 , A__=224 , A__=16 , A__=3 , A__=True , **A__ , ):
super().__init__(**A__ )
A__ : Optional[int] = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Tuple = initializer_range
A__ : Optional[Any] = layer_norm_eps
A__ : Optional[Any] = image_size
A__ : List[str] = patch_size
A__ : Tuple = num_channels
A__ : str = qkv_bias
| 64 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = CTRLTokenizer
UpperCAmelCase__: int = False
UpperCAmelCase__: Optional[Any] = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
A__ : Union[str, Any] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Optional[int] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
A__ : str = {"""unk_token""": """<unk>"""}
A__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
def __A ( self , **A__ ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
A__ : Optional[int] = """adapt react readapt apt"""
A__ : Tuple = """adapt react readapt apt"""
return input_text, output_text
def __A ( self ):
A__ : Any = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : str = """adapt react readapt apt"""
A__ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
A__ : List[str] = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : List[str] = tokens + [tokenizer.unk_token]
A__ : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
| 64 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__: Optional[int] = (('''num_inference_steps''', 25),)
def __A ( self , **A__ ):
A__ : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**A__ )
return config
def __A ( self , A__=0 , **A__ ):
A__ : Tuple = dict(self.forward_default_kwargs )
A__ : str = kwargs.pop("""num_inference_steps""" , A__ )
A__ : Optional[int] = self.dummy_sample
A__ : Any = 0.1 * sample
A__ : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ : List[str] = self.get_scheduler_config(**A__ )
A__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals
A__ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
A__ : Optional[int] = scheduler_class.from_pretrained(A__ )
new_scheduler.set_timesteps(A__ )
# copy over dummy past residuals
A__ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ : Optional[int] = sample, sample
for t in range(A__ , time_step + scheduler.config.solver_order + 1 ):
A__ : int = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
A__ : List[str] = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self ):
pass
def __A ( self , A__=0 , **A__ ):
A__ : Any = dict(self.forward_default_kwargs )
A__ : List[str] = kwargs.pop("""num_inference_steps""" , A__ )
A__ : Union[str, Any] = self.dummy_sample
A__ : int = 0.1 * sample
A__ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ : Optional[int] = self.get_scheduler_config()
A__ : Tuple = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals (must be after setting timesteps)
A__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
A__ : Any = scheduler_class.from_pretrained(A__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A__ )
# copy over dummy past residual (must be after setting timesteps)
A__ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ : List[Any] = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
A__ : List[Any] = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self , A__=None , **A__ ):
if scheduler is None:
A__ : int = self.scheduler_classes[0]
A__ : List[Any] = self.get_scheduler_config(**A__ )
A__ : Optional[Any] = scheduler_class(**A__ )
A__ : Union[str, Any] = self.scheduler_classes[0]
A__ : Optional[Any] = self.get_scheduler_config(**A__ )
A__ : List[str] = scheduler_class(**A__ )
A__ : Union[str, Any] = 10
A__ : Tuple = self.dummy_model()
A__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(A__ )
for i, t in enumerate(scheduler.timesteps ):
A__ : Union[str, Any] = model(A__ , A__ )
A__ : Dict = scheduler.step(A__ , A__ , A__ ).prev_sample
return sample
def __A ( self ):
A__ : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ : int = 50
A__ : Dict = self.dummy_model()
A__ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(A__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A__ : int = model(A__ , A__ )
A__ : List[Any] = scheduler.step(A__ , A__ , A__ ).prev_sample
A__ : str = torch.mean(torch.abs(A__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3
def __A ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=A__ )
def __A ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A__ : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ : Tuple = self.full_loop(scheduler=A__ )
A__ : Optional[int] = torch.mean(torch.abs(A__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
A__ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
A__ : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A__ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ : Any = self.full_loop(scheduler=A__ )
A__ : str = torch.mean(torch.abs(A__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def __A ( self ):
self.check_over_configs(thresholding=A__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A__ , prediction_type=A__ , sample_max_value=A__ , algorithm_type="""dpmsolver++""" , solver_order=A__ , solver_type=A__ , )
def __A ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def __A ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A__ , solver_type=A__ , prediction_type=A__ , algorithm_type=A__ , )
A__ : Optional[Any] = self.full_loop(
solver_order=A__ , solver_type=A__ , prediction_type=A__ , algorithm_type=A__ , )
assert not torch.isnan(A__ ).any(), "Samples have nan numbers"
def __A ( self ):
self.check_over_configs(lower_order_final=A__ )
self.check_over_configs(lower_order_final=A__ )
def __A ( self ):
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __A ( self ):
self.check_over_configs(variance_type=A__ )
self.check_over_configs(variance_type="""learned_range""" )
def __A ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=A__ , time_step=0 )
def __A ( self ):
A__ : str = self.full_loop()
A__ : Dict = torch.mean(torch.abs(A__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3
def __A ( self ):
A__ : List[str] = self.full_loop(use_karras_sigmas=A__ )
A__ : List[Any] = torch.mean(torch.abs(A__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3
def __A ( self ):
A__ : Tuple = self.full_loop(prediction_type="""v_prediction""" )
A__ : Any = torch.mean(torch.abs(A__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3
def __A ( self ):
A__ : List[str] = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=A__ )
A__ : str = torch.mean(torch.abs(A__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3
def __A ( self ):
A__ : Any = self.scheduler_classes[0]
A__ : Union[str, Any] = self.get_scheduler_config(thresholding=A__ , dynamic_thresholding_ratio=0 )
A__ : str = scheduler_class(**A__ )
A__ : Dict = 10
A__ : Union[str, Any] = self.dummy_model()
A__ : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(A__ )
for i, t in enumerate(scheduler.timesteps ):
A__ : str = model(A__ , A__ )
A__ : Optional[Any] = scheduler.step(A__ , A__ , A__ ).prev_sample
assert sample.dtype == torch.floataa
| 64 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Union[str, Any] ) -> Union[str, Any]:
A__ : Optional[Any] = XCLIPTextConfig()
# derive patch size from model name
A__ : Any = model_name.find("""patch""" )
A__ : List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
A__ : List[Any] = XCLIPVisionConfig(patch_size=lowercase_ , num_frames=lowercase_ )
if "large" in model_name:
A__ : str = 768
A__ : str = 3072
A__ : Any = 12
A__ : Tuple = 1024
A__ : str = 4096
A__ : Any = 16
A__ : List[Any] = 24
A__ : Optional[Any] = 768
A__ : Any = 3072
if model_name == "xclip-large-patch14-16-frames":
A__ : List[Any] = 336
A__ : List[str] = XCLIPConfig.from_text_vision_configs(lowercase_ , lowercase_ )
if "large" in model_name:
A__ : Dict = 768
return config
def UpperCamelCase (lowercase_: Dict ) -> Dict:
# text encoder
if name == "token_embedding.weight":
A__ : Dict = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
A__ : Union[str, Any] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
A__ : Optional[Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
A__ : Optional[int] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
A__ : Optional[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
A__ : str = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
A__ : Dict = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
A__ : Optional[Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
A__ : List[Any] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
A__ : List[str] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
A__ : Union[str, Any] = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
A__ : Any = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
A__ : str = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
A__ : List[str] = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
A__ : str = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
A__ : Dict = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
A__ : Any = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
A__ : str = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
A__ : List[Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
A__ : List[str] = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
A__ : Union[str, Any] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
A__ : List[Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def UpperCamelCase (lowercase_: str , lowercase_: int ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
A__ : Dict = orig_state_dict.pop(lowercase_ )
if "attn.in_proj" in key:
A__ : Union[str, Any] = key.split(""".""" )
if key.startswith("""visual""" ):
A__ : Any = key_split[3]
A__ : Optional[int] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
A__ : List[Any] = val[
:dim, :
]
A__ : Optional[Any] = val[
dim : dim * 2, :
]
A__ : Tuple = val[
-dim:, :
]
else:
A__ : List[str] = val[
:dim
]
A__ : Union[str, Any] = val[
dim : dim * 2
]
A__ : List[Any] = val[
-dim:
]
else:
if "weight" in key:
A__ : List[Any] = val[
:dim, :
]
A__ : Union[str, Any] = val[
dim : dim * 2, :
]
A__ : List[str] = val[
-dim:, :
]
else:
A__ : Optional[Any] = val[:dim]
A__ : Dict = val[
dim : dim * 2
]
A__ : str = val[-dim:]
elif key.startswith("""mit""" ):
A__ : Optional[Any] = key_split[2]
A__ : str = config.vision_config.mit_hidden_size
if "weight" in key:
A__ : Dict = val[:dim, :]
A__ : int = val[dim : dim * 2, :]
A__ : List[str] = val[-dim:, :]
else:
A__ : List[str] = val[:dim]
A__ : List[Any] = val[dim : dim * 2]
A__ : Optional[Any] = val[-dim:]
else:
A__ : Optional[int] = key_split[2]
A__ : Dict = config.text_config.hidden_size
if "weight" in key:
A__ : int = val[:dim, :]
A__ : Any = val[
dim : dim * 2, :
]
A__ : List[str] = val[-dim:, :]
else:
A__ : Any = val[:dim]
A__ : Union[str, Any] = val[
dim : dim * 2
]
A__ : str = val[-dim:]
else:
A__ : List[Any] = rename_key(lowercase_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
A__ : List[Any] = val.T
A__ : List[Any] = val
return orig_state_dict
def UpperCamelCase (lowercase_: Optional[int] ) -> List[str]:
if num_frames == 8:
A__ : Tuple = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
A__ : Tuple = """eating_spaghetti.npy"""
elif num_frames == 32:
A__ : Optional[int] = """eating_spaghetti_32_frames.npy"""
A__ : Any = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowercase_ , repo_type="""dataset""" , )
A__ : str = np.load(lowercase_ )
return list(lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict=None , lowercase_: Optional[Any]=False ) -> Union[str, Any]:
A__ : int = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
A__ : List[Any] = model_to_url[model_name]
A__ : List[str] = 8
if "16-frames" in model_name:
A__ : Any = 16
elif "shot" in model_name:
A__ : Dict = 32
A__ : Any = get_xclip_config(lowercase_ , lowercase_ )
A__ : str = XCLIPModel(lowercase_ )
model.eval()
if "drive" in checkpoint_url:
A__ : Any = """pytorch_model.bin"""
gdown.cached_download(lowercase_ , lowercase_ , quiet=lowercase_ )
A__ : int = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
else:
A__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ )["""model"""]
A__ : List[Any] = convert_state_dict(lowercase_ , lowercase_ )
A__ : Tuple = XCLIPModel(lowercase_ )
A__ , A__ : List[Any] = model.load_state_dict(lowercase_ , strict=lowercase_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
A__ : List[Any] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
A__ : Optional[int] = VideoMAEImageProcessor(size=lowercase_ )
A__ : Union[str, Any] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
A__ : Tuple = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
A__ : Dict = XCLIPProcessor(image_processor=lowercase_ , tokenizer=lowercase_ )
A__ : str = prepare_video(lowercase_ )
A__ : Tuple = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowercase_ , return_tensors="""pt""" , padding=lowercase_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
A__ : List[str] = model(**lowercase_ )
# Verify outputs
A__ : int = outputs.logits_per_video
A__ : str = logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowercase_ )
# kinetics-400
if model_name == "xclip-base-patch32":
A__ : Optional[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
A__ : Union[str, Any] = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] )
elif model_name == "xclip-base-patch16":
A__ : Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
A__ : Union[str, Any] = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] )
elif model_name == "xclip-large-patch14":
A__ : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
A__ : List[Any] = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
A__ : Tuple = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
A__ : Tuple = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
A__ : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
A__ : List[Any] = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
A__ : Optional[int] = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
A__ : Union[str, Any] = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
A__ : Any = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
A__ : Any = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
A__ : Optional[int] = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
A__ : Union[str, Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
A__ : List[Any] = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
A__ : Union[str, Any] = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowercase_ , organization="""nielsr""" )
processor.push_to_hub(lowercase_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowercase_ , organization="""nielsr""" )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A_ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 64 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
A_ : Any = '__DUMMY_TRANSFORMERS_USER__'
A_ : Tuple = 'Dummy User'
A_ : Any = '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'
A_ : str = 'https://hub-ci.huggingface.co'
A_ : Any = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
A_ : List[str] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
A_ : Any = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def UpperCamelCase (lowercase_: Union[str, Any] ) -> List[str]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowercase_ )
@pytest.fixture
def UpperCamelCase (lowercase_: Union[str, Any] ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowercase_ )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowercase_ )
@pytest.fixture
def UpperCamelCase (lowercase_: Dict ) -> List[str]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowercase_ )
@pytest.fixture
def UpperCamelCase (lowercase_: Dict , lowercase_: str ) -> Tuple:
HfFolder.save_token(lowercase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def UpperCamelCase () -> str:
return HfApi(endpoint=lowercase_ )
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: HfApi ) -> Optional[int]:
A__ : Optional[int] = HfFolder.get_token()
HfFolder.save_token(lowercase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowercase_ )
@pytest.fixture
def UpperCamelCase (lowercase_: Any ) -> int:
def _cleanup_repo(lowercase_: List[str] ):
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def UpperCamelCase (lowercase_: str ) -> Dict:
@contextmanager
def _temporary_repo(lowercase_: Tuple ):
try:
yield repo_id
finally:
cleanup_repo(lowercase_ )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: HfApi , lowercase_: Optional[Any] , lowercase_: List[str] ) -> Optional[int]:
A__ : Dict = f"""repo_txt_data-{int(time.time() * 10E3 )}"""
A__ : Optional[int] = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo="""data/text_data.txt""" , repo_id=lowercase_ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Union[str, Any] , lowercase_: int ) -> int:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: HfApi , lowercase_: List[str] , lowercase_: int ) -> int:
A__ : Optional[int] = f"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
A__ : str = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo="""data.zip""" , repo_id=lowercase_ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[Any] , lowercase_: Dict ) -> List[Any]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: HfApi , lowercase_: List[Any] , lowercase_: List[str] ) -> int:
A__ : Dict = f"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
A__ : Tuple = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo="""data.zip""" , repo_id=lowercase_ , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase (lowercase_: List[str] , lowercase_: List[str] , lowercase_: Any ) -> Optional[int]:
return hf_private_dataset_repo_zipped_img_data_
| 64 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = StableUnCLIPPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__: str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase__: Union[str, Any] = False
def __A ( self ):
A__ : Dict = 32
A__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A__ : int = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=A__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=A__ , num_layers=1 , )
torch.manual_seed(0 )
A__ : Optional[int] = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=A__ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
A__ : Dict = StableUnCLIPImageNormalizer(embedding_dim=A__ )
A__ : int = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
A__ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
A__ : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A__ , layers_per_block=1 , upcast_attention=A__ , use_linear_projection=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=A__ , steps_offset=1 , )
torch.manual_seed(0 )
A__ : List[Any] = AutoencoderKL()
A__ : Union[str, Any] = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Optional[int] = torch.manual_seed(A__ )
else:
A__ : int = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
A__ : int = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=A__ )
def __A ( self ):
A__ : str = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=A__ )
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
A__ : Optional[Any] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : List[str] = pipe("""anime turle""" , generator=A__ , output_type="""np""" )
A__ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A__ , A__ )
def __A ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Optional[int] = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
A__ : List[Any] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Any = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
A__ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 64 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import unittest
from knapsack import greedy_knapsack as kp
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Optional[int] = [10, 20, 30, 40, 50, 60]
A__ : str = [2, 4, 6, 8, 10, 12]
A__ : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(A__ , A__ , A__ ) , 210 )
def __A ( self ):
self.assertRaisesRegex(A__ , """max_weight must greater than zero.""" )
def __A ( self ):
self.assertRaisesRegex(A__ , """Weight can not be negative.""" )
def __A ( self ):
self.assertRaisesRegex(A__ , """Profit can not be negative.""" )
def __A ( self ):
self.assertRaisesRegex(A__ , """max_weight must greater than zero.""" )
def __A ( self ):
self.assertRaisesRegex(
A__ , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 64 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 1 |
def UpperCamelCase (lowercase_: list ) -> list:
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
A__ : Dict = []
def generate(lowercase_: int , lowercase_: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ : List[str] = arr[k - 1], arr[0]
generate(k - 1 , lowercase_ )
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
A_ : int = input('Enter numbers separated by a comma:\n').strip()
A_ : Any = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 64 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__: ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCAmelCase__: ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase__: str = "text"
UpperCAmelCase__: str = "labels"
def __A ( self , A__ ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , A__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
A__ : Optional[Any] = copy.deepcopy(self )
A__ : Tuple = self.label_schema.copy()
A__ : List[Any] = features[self.label_column]
A__ : str = label_schema
return task_template
@property
def __A ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: float
UpperCAmelCase__: TreeNode | None = None
UpperCAmelCase__: TreeNode | None = None
def UpperCamelCase (lowercase_: TreeNode | None ) -> bool:
# Validation
def is_valid_tree(lowercase_: TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(lowercase_ , lowercase_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowercase_ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
lowercase_: TreeNode | None , lowercase_: float , lowercase_: float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowercase_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowercase_ )
)
return is_binary_search_tree_recursive_check(lowercase_ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
def UpperCamelCase (lowercase_: int ) -> "list[int]":
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
A__ : str = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ : Dict = 1
if upper_limit > 0:
A__ : str = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
A_ : List[Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
import math
class _a :
'''simple docstring'''
def __init__( self , A__=0 ): # a graph with Node 0,1,...,N-1
A__ : int = n
A__ : Union[str, Any] = [
[math.inf for j in range(0 , A__ )] for i in range(0 , A__ )
] # adjacency matrix for weight
A__ : str = [
[math.inf for j in range(0 , A__ )] for i in range(0 , A__ )
] # dp[i][j] stores minimum distance from i to j
def __A ( self , A__ , A__ , A__ ):
A__ : str = w
def __A ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __A ( self , A__ , A__ ):
return self.dp[u][v]
if __name__ == "__main__":
A_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = PegasusTokenizer
UpperCAmelCase__: Optional[int] = PegasusTokenizerFast
UpperCAmelCase__: Optional[Any] = True
UpperCAmelCase__: Any = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ : List[str] = PegasusTokenizer(A__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self ):
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def __A ( self , **A__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
return ("This is a test", "This is a test")
def __A ( self ):
A__ : Dict = """</s>"""
A__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def __A ( self ):
A__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A__ ) , 1103 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __A ( self ):
A__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
A__ : int = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
A__ : Any = rust_tokenizer([raw_input_str] , return_tensors=A__ , add_special_tokens=A__ ).input_ids[0]
A__ : Any = py_tokenizer([raw_input_str] , return_tensors=A__ , add_special_tokens=A__ ).input_ids[0]
self.assertListEqual(A__ , A__ )
def __A ( self ):
A__ : Tuple = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
A__ : List[str] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
A__ : int = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
A__ : Tuple = tokenizer([raw_input_str] , return_tensors=A__ ).input_ids[0]
self.assertListEqual(A__ , A__ )
def __A ( self ):
A__ : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
A__ : str = """To ensure a smooth flow of bank resolutions."""
A__ : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
A__ : int = tokenizer([raw_input_str] , return_tensors=A__ ).input_ids[0]
self.assertListEqual(A__ , A__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __A ( self ):
A__ : List[str] = ["""This is going to be way too long.""" * 150, """short example"""]
A__ : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
A__ : Dict = self._large_tokenizer(A__ , padding=A__ , truncation=A__ , return_tensors="""pt""" )
A__ : List[Any] = self._large_tokenizer(
text_target=A__ , max_length=5 , padding=A__ , truncation=A__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A__ ) == 2 # input_ids, attention_mask.
@slow
def __A ( self ):
# fmt: off
A__ : Any = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = PegasusTokenizer
UpperCAmelCase__: List[Any] = PegasusTokenizerFast
UpperCAmelCase__: int = True
UpperCAmelCase__: Optional[Any] = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Optional[int] = PegasusTokenizer(A__ , offset=0 , mask_token_sent=A__ , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self ):
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def __A ( self , **A__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
return ("This is a test", "This is a test")
def __A ( self ):
A__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
A__ : Optional[int] = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
A__ : List[Any] = rust_tokenizer([raw_input_str] , return_tensors=A__ , add_special_tokens=A__ ).input_ids[0]
A__ : List[str] = py_tokenizer([raw_input_str] , return_tensors=A__ , add_special_tokens=A__ ).input_ids[0]
self.assertListEqual(A__ , A__ )
@require_torch
def __A ( self ):
A__ : int = ["""This is going to be way too long.""" * 1000, """short example"""]
A__ : Any = ["""not super long but more than 5 tokens""", """tiny"""]
A__ : Dict = self._large_tokenizer(A__ , padding=A__ , truncation=A__ , return_tensors="""pt""" )
A__ : Any = self._large_tokenizer(
text_target=A__ , max_length=5 , padding=A__ , truncation=A__ , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A__ ) == 2 # input_ids, attention_mask.
def __A ( self ):
A__ : Dict = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
A__ : List[Any] = self._large_tokenizer(A__ ).input_ids
self.assertListEqual(
A__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 1 |
from __future__ import annotations
from typing import Any
def UpperCamelCase (lowercase_: list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def UpperCamelCase (lowercase_: list[Any] , lowercase_: list[Any] , lowercase_: int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 64 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__: Tuple = '''CLIPImageProcessor'''
UpperCAmelCase__: Union[str, Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , A__=None , A__=None , **A__ ):
A__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
A__ : Any = kwargs.pop("""feature_extractor""" )
A__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A__ : Tuple = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
A__ : List[Any] = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
A__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def __A ( self , *A__ , **A__ ):
return self.tokenizer.batch_decode(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
return self.tokenizer.decode(*A__ , **A__ )
@property
def __A ( self ):
A__ : Optional[int] = self.tokenizer.model_input_names
A__ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A__ , )
return self.image_processor
| 64 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCamelCase (lowercase_: str , lowercase_: str , **lowercase_: str ) -> Optional[int]:
A__ : str = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
A__ : List[str] = AutoModelForSeqaSeqLM.from_config(lowercase_ )
model.save_pretrained(lowercase_ )
AutoTokenizer.from_pretrained(lowercase_ ).save_pretrained(lowercase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : int = inspect.getfile(accelerate.test_utils )
A__ : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
A__ : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
A__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def __A ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
A__ : Optional[int] = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A__ , env=os.environ.copy() )
@require_multi_gpu
def __A ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
A__ : Any = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A__ , env=os.environ.copy() )
@require_multi_gpu
def __A ( self ):
A__ : List[Any] = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A__ , env=os.environ.copy() )
@require_multi_gpu
def __A ( self ):
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
A__ : Optional[int] = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : Optional[Any] = Accelerator()
A_ : Tuple = (accelerator.state.process_index + 2, 10)
A_ : Any = torch.randint(0, 10, shape).to(accelerator.device)
A_ : Any = ''
A_ : List[str] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
A_ : Tuple = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
A_ : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 64 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__=768 ):
super().__init__(A__ )
A__ : Tuple = proj_size
A__ : int = CLIPVisionModel(A__ )
A__ : Union[str, Any] = PaintByExampleMapper(A__ )
A__ : Any = nn.LayerNorm(config.hidden_size )
A__ : List[str] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A__ : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __A ( self , A__ , A__=False ):
A__ : Optional[int] = self.model(pixel_values=A__ )
A__ : Dict = clip_output.pooler_output
A__ : Union[str, Any] = self.mapper(latent_states[:, None] )
A__ : Optional[Any] = self.final_layer_norm(A__ )
A__ : Any = self.proj_out(A__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__()
A__ : Optional[int] = (config.num_hidden_layers + 1) // 5
A__ : List[Any] = config.hidden_size
A__ : Optional[int] = 1
A__ : Tuple = nn.ModuleList(
[
BasicTransformerBlock(A__ , A__ , A__ , activation_fn="""gelu""" , attention_bias=A__ )
for _ in range(A__ )
] )
def __A ( self , A__ ):
for block in self.blocks:
A__ : str = block(A__ )
return hidden_states
| 64 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 1 |
A_ : Optional[int] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 64 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=3 , A__=True , A__=True , A__=0.1 , A__=0.1 , A__=224 , A__=1000 , A__=[3, 3, 6, 4] , A__=[48, 56, 112, 220] , ):
A__ : Tuple = parent
A__ : str = batch_size
A__ : int = num_channels
A__ : Union[str, Any] = is_training
A__ : Tuple = use_labels
A__ : List[str] = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : Dict = num_labels
A__ : List[Any] = image_size
A__ : List[Any] = layer_depths
A__ : str = embed_dims
def __A ( self ):
A__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : int = None
if self.use_labels:
A__ : int = ids_tensor([self.batch_size] , self.num_labels )
A__ : Tuple = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=A__ , layer_scale_init_value=1e-5 , )
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = SwiftFormerModel(config=A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __A ( self , A__ , A__ , A__ ):
A__ : Tuple = self.num_labels
A__ : Tuple = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A__ : List[str] = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Optional[int] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
((A__) , (A__) , (A__)) : Tuple = self.prepare_config_and_inputs()
A__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__: List[Any] = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__: Dict = False
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: Any = False
UpperCAmelCase__: int = False
UpperCAmelCase__: Any = False
def __A ( self ):
A__ : List[str] = SwiftFormerModelTester(self )
A__ : int = ConfigTester(
self , config_class=A__ , has_text_modality=A__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def __A ( self ):
pass
def __A ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(A__ )
A__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def __A ( self ):
A__ , A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(A__ )
A__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Dict = [*signature.parameters.keys()]
A__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def __A ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple = SwiftFormerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def __A ( self ):
pass
def __A ( self ):
def check_hidden_states_output(A__ , A__ , A__ ):
A__ : Any = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
A__ : Any = model(**self._prepare_for_class(A__ , A__ ) )
A__ : str = outputs.hidden_states
A__ : Tuple = 8
self.assertEqual(len(A__ ) , A__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(A__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : List[Any] = True
check_hidden_states_output(A__ , A__ , A__ )
def __A ( self ):
def _config_zero_init(A__ ):
A__ : str = copy.deepcopy(A__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(A__ , A__ , 1e-10 )
if isinstance(getattr(A__ , A__ , A__ ) , A__ ):
A__ : Optional[Any] = _config_zero_init(getattr(A__ , A__ ) )
setattr(A__ , A__ , A__ )
return configs_no_init
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] = _config_zero_init(A__ )
for model_class in self.all_model_classes:
A__ : str = model_class(config=A__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ):
pass
def UpperCamelCase () -> Optional[Any]:
A__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def __A ( self ):
A__ : List[Any] = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(A__ )
A__ : Any = self.default_image_processor
A__ : List[Any] = prepare_img()
A__ : Optional[Any] = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
A__ : Dict = model(**A__ )
# verify the logits
A__ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
A__ : Any = torch.tensor([[-2.1_703e00, 2.1_107e00, -2.0_811e00]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 64 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
A_ : Dict = False
class _a (unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
A__ : str = """A painting of a squirrel eating a burger """
A__ : int = torch.manual_seed(0 )
A__ : int = pipe(
prompt=A__ , generator=A__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A__ )
A__ : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
A__ : List[str] = generator.manual_seed(0 )
A__ : List[Any] = pipe(
prompt=A__ , generator=A__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __A ( self ):
A__ : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
A__ : List[str] = """A painting of a squirrel eating a burger """
A__ : List[str] = torch.manual_seed(0 )
A__ : Optional[int] = pipe(
prompt=A__ , generator=A__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ : Optional[Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 64 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 1 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> list:
A__ : List[str] = len(lowercase_ )
A__ : Optional[int] = []
for i in range(len(lowercase_ ) - pat_len + 1 ):
A__ : int = True
for j in range(lowercase_ ):
if s[i + j] != pattern[j]:
A__ : List[str] = False
break
if match_found:
position.append(lowercase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 64 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 1 |
import os
def UpperCamelCase () -> Union[str, Any]:
A__ : Dict = os.path.dirname(os.path.realpath(lowercase_ ) )
A__ : Union[str, Any] = os.path.join(lowercase_ , """triangle.txt""" )
with open(lowercase_ ) as f:
A__ : List[Any] = f.readlines()
A__ : Optional[Any] = []
for line in triangle:
A__ : Optional[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowercase_ ) )
a.append(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
for j in range(len(a[i] ) ):
A__ : int = a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Optional[int] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowercase_ , lowercase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 64 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A_ : Dict = input('Enter image url: ').strip()
print(f'''Downloading image from {url} ...''')
A_ : Any = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
A_ : Dict = soup.find('meta', {'property': 'og:image'})['content']
A_ : int = requests.get(image_url).content
A_ : Dict = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 64 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A_ : Tuple = 'facebook/wmt19-en-de'
A_ : int = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A_ : Dict = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A_ : List[str] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
A_ : int = tokenizer(['Making tiny model'], return_tensors='pt')
A_ : Optional[int] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
A_ : Optional[Any] = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 64 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
A_ : Optional[Any] = True
from torch.cuda.amp import autocast
A_ : int = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Union[str, Any]=None , lowercase_: Tuple=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=lowercase_ )
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__: Optional[bool] = field(
default=__magic_name__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCAmelCase__: Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
UpperCAmelCase__: Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
UpperCAmelCase__: Optional[float] = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
UpperCAmelCase__: Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
UpperCAmelCase__: Optional[float] = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
UpperCAmelCase__: Optional[float] = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: Optional[str] = field(
default=__magic_name__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase__: Optional[str] = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCAmelCase__: bool = field(
default=__magic_name__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__: Optional[int] = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__: List[str] = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: WavaVecaProcessor
UpperCAmelCase__: Union[bool, str] = True
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Optional[int] = None
def __call__( self , A__ ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
A__ : Tuple = [{"""input_values""": feature["""input_values"""]} for feature in features]
A__ : List[str] = [{"""input_ids""": feature["""labels"""]} for feature in features]
A__ : List[Any] = self.processor.pad(
A__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
A__ : Optional[Any] = self.processor.pad(
labels=A__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
A__ : Optional[Any] = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
A__ : str = labels
return batch
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ ):
model.train()
A__ : Optional[Any] = self._prepare_inputs(A__ )
if self.use_amp:
with autocast():
A__ : Any = self.compute_loss(A__ , A__ )
else:
A__ : Union[str, Any] = self.compute_loss(A__ , A__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
A__ : int = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A__ : List[Any] = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
A__ : Optional[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A__ ).backward()
elif self.use_apex:
with amp.scale_loss(A__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A__ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
A__ : Optional[int] = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
A__ : Optional[Any] = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
A__ : Dict = f"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowercase_: List[str] ):
A__ : int = re.sub(lowercase_ , """""" , batch["""sentence"""] ).lower() + """ """
return batch
A__ : Any = train_dataset.map(lowercase_ , remove_columns=["""sentence"""] )
A__ : Tuple = eval_dataset.map(lowercase_ , remove_columns=["""sentence"""] )
def extract_all_chars(lowercase_: Optional[Any] ):
A__ : Optional[int] = """ """.join(batch["""text"""] )
A__ : Union[str, Any] = list(set(lowercase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
A__ : List[str] = train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=train_dataset.column_names , )
A__ : Optional[Any] = train_dataset.map(
lowercase_ , batched=lowercase_ , batch_size=-1 , keep_in_memory=lowercase_ , remove_columns=eval_dataset.column_names , )
A__ : List[Any] = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
A__ : Optional[int] = {v: k for k, v in enumerate(lowercase_ )}
A__ : List[Any] = vocab_dict[""" """]
del vocab_dict[" "]
A__ : List[str] = len(lowercase_ )
A__ : Optional[int] = len(lowercase_ )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(lowercase_ , lowercase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ : Optional[int] = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
A__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ )
A__ : Optional[int] = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
A__ : Optional[int] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
A__ : Tuple = min(len(lowercase_ ) , data_args.max_train_samples )
A__ : Dict = train_dataset.select(range(lowercase_ ) )
if data_args.max_val_samples is not None:
A__ : str = eval_dataset.select(range(data_args.max_val_samples ) )
A__ : Union[str, Any] = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowercase_: Any ):
A__ , A__ : Any = torchaudio.load(batch["""path"""] )
A__ : List[Any] = resampler(lowercase_ ).squeeze().numpy()
A__ : Tuple = 16000
A__ : Any = batch["""text"""]
return batch
A__ : Optional[Any] = train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
A__ : Optional[Any] = eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowercase_: str ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
A__ : int = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(lowercase_ )
return batch
A__ : Any = train_dataset.map(
lowercase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
A__ : Union[str, Any] = eval_dataset.map(
lowercase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
A__ : int = datasets.load_metric("""wer""" )
def compute_metrics(lowercase_: str ):
A__ : Any = pred.predictions
A__ : Optional[Any] = np.argmax(lowercase_ , axis=-1 )
A__ : List[str] = processor.tokenizer.pad_token_id
A__ : List[Any] = processor.batch_decode(lowercase_ )
# we do not want to group tokens when computing the metrics
A__ : List[str] = processor.batch_decode(pred.label_ids , group_tokens=lowercase_ )
A__ : int = wer_metric.compute(predictions=lowercase_ , references=lowercase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
A__ : int = DataCollatorCTCWithPadding(processor=lowercase_ , padding=lowercase_ )
# Initialize our Trainer
A__ : int = CTCTrainer(
model=lowercase_ , data_collator=lowercase_ , args=lowercase_ , compute_metrics=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A__ : int = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
A__ : Tuple = model_args.model_name_or_path
else:
A__ : str = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
A__ : List[Any] = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
A__ : Tuple = train_result.metrics
A__ : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
A__ : List[Any] = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("""train""" , lowercase_ )
trainer.save_metrics("""train""" , lowercase_ )
trainer.save_state()
# Evaluation
A__ : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A__ : Optional[int] = trainer.evaluate()
A__ : str = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowercase_ )
A__ : Tuple = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
return results
if __name__ == "__main__":
main()
| 64 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
A_ : Tuple = HfApi()
A_ : Any = {}
# fmt: off
A_ : Any = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
A_ : Optional[int] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
A_ : Optional[Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
A_ : Tuple = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
A_ : Optional[Any] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
A_ : Optional[int] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
A_ : Any = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
A_ : Dict = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
A_ : str = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
A_ : Optional[Any] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
A_ : List[Any] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
A_ : Tuple = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
A_ : Any = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
A_ : Tuple = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
A_ : str = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
A_ : Optional[Any] = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
A_ : int = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('CompVis'):
A_ : Any = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
A_ : Any = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
A_ : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
A_ : List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
A_ : Dict = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 64 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
A_ : Any = get_logger(__name__)
class _a (enum.Enum ):
'''simple docstring'''
UpperCAmelCase__: Dict = '''all_checks'''
UpperCAmelCase__: Optional[int] = '''basic_checks'''
UpperCAmelCase__: Optional[Any] = '''no_checks'''
class _a (__magic_name__ ):
'''simple docstring'''
class _a (__magic_name__ ):
'''simple docstring'''
class _a (__magic_name__ ):
'''simple docstring'''
class _a (__magic_name__ ):
'''simple docstring'''
def UpperCamelCase (lowercase_: Optional[dict] , lowercase_: dict , lowercase_: List[Any]=None ) -> Dict:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase_ ) - set(lowercase_ ) ) )
A__ : List[str] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A__ : Union[str, Any] = """ for """ + verification_name if verification_name is not None else """"""
if len(lowercase_ ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class _a (__magic_name__ ):
'''simple docstring'''
class _a (__magic_name__ ):
'''simple docstring'''
class _a (__magic_name__ ):
'''simple docstring'''
class _a (__magic_name__ ):
'''simple docstring'''
def UpperCamelCase (lowercase_: Optional[dict] , lowercase_: dict ) -> int:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
A__ : int = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase_ ) )
logger.info("""All the splits matched successfully.""" )
def UpperCamelCase (lowercase_: str , lowercase_: bool = True ) -> dict:
if record_checksum:
A__ : List[Any] = shaaaa()
with open(lowercase_ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"""""" ):
m.update(lowercase_ )
A__ : Any = m.hexdigest()
else:
A__ : Union[str, Any] = None
return {"num_bytes": os.path.getsize(lowercase_ ), "checksum": checksum}
def UpperCamelCase (lowercase_: Optional[Any] ) -> List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 64 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 64 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A_ : Optional[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A_ : Optional[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCamelCase (lowercase_: Vector , lowercase_: Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def UpperCamelCase (lowercase_: Vector , lowercase_: Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCamelCase () -> None:
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 64 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
'''simple docstring'''
@property
def __A ( self ):
return self.get_dummy_input()
@property
def __A ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def __A ( self , A__=True , A__=False , A__=False , A__=False , ):
A__ : str = 4
A__ : List[str] = 32
A__ : str = (32, 32)
A__ : List[str] = torch.manual_seed(0 )
A__ : Tuple = torch.device(A__ )
A__ : str = (batch_size, num_channels) + sizes
A__ : int = randn_tensor(A__ , generator=A__ , device=A__ )
A__ : Union[str, Any] = {"""hidden_states""": hidden_states}
if include_temb:
A__ : List[Any] = 128
A__ : Dict = randn_tensor((batch_size, temb_channels) , generator=A__ , device=A__ )
if include_res_hidden_states_tuple:
A__ : Optional[int] = torch.manual_seed(1 )
A__ : List[Any] = (randn_tensor(A__ , generator=A__ , device=A__ ),)
if include_encoder_hidden_states:
A__ : List[Any] = floats_tensor((batch_size, 32, 32) ).to(A__ )
if include_skip_sample:
A__ : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=A__ , device=A__ )
return dummy_input
def __A ( self ):
A__ : Dict = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A__ : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A__ : Any = self.dummy_input
return init_dict, inputs_dict
def __A ( self , A__ ):
A__ , A__ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
A__ : Optional[Any] = self.block_class(**A__ )
unet_block.to(A__ )
unet_block.eval()
with torch.no_grad():
A__ : Union[str, Any] = unet_block(**A__ )
if isinstance(A__ , A__ ):
A__ : Any = output[0]
self.assertEqual(output.shape , self.output_shape )
A__ : str = output[0, -1, -3:, -3:]
A__ : List[Any] = torch.tensor(A__ ).to(A__ )
assert torch_all_close(output_slice.flatten() , A__ , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def __A ( self ):
A__ , A__ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
A__ : str = self.block_class(**A__ )
model.to(A__ )
model.train()
A__ : Optional[int] = model(**A__ )
if isinstance(A__ , A__ ):
A__ : Tuple = output[0]
A__ : Tuple = torch.device(A__ )
A__ : str = randn_tensor(output.shape , device=A__ )
A__ : Union[str, Any] = torch.nn.functional.mse_loss(A__ , A__ )
loss.backward()
| 64 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: int = '''naver-clova-ix/donut-base-finetuned-docvqa'''
UpperCAmelCase__: Optional[int] = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
UpperCAmelCase__: int = '''document_qa'''
UpperCAmelCase__: Union[str, Any] = AutoProcessor
UpperCAmelCase__: Optional[Any] = VisionEncoderDecoderModel
UpperCAmelCase__: Union[str, Any] = ['''image''', '''text''']
UpperCAmelCase__: Union[str, Any] = ['''text''']
def __init__( self , *A__ , **A__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*A__ , **A__ )
def __A ( self , A__ , A__ ):
A__ : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
A__ : List[str] = task_prompt.replace("""{user_input}""" , A__ )
A__ : Optional[Any] = self.pre_processor.tokenizer(
A__ , add_special_tokens=A__ , return_tensors="""pt""" ).input_ids
A__ : int = self.pre_processor(A__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , A__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A__ , ).sequences
def __A ( self , A__ ):
A__ : Optional[int] = self.pre_processor.batch_decode(A__ )[0]
A__ : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
A__ : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
A__ : List[Any] = re.sub(r"""<.*?>""" , """""" , A__ , count=1 ).strip() # remove first task start token
A__ : Tuple = self.pre_processor.tokenajson(A__ )
return sequence["answer"]
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase (lowercase_: int ) -> Optional[int]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ , A__ ):
super().__init__()
A__ : Optional[int] = module
A__ : List[Any] = nn.Sequential(
nn.Linear(module.in_features , A__ , bias=A__ ) , nn.Linear(A__ , module.out_features , bias=A__ ) , )
A__ : List[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A__ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , A__ , *A__ , **A__ ):
return self.module(A__ , *A__ , **A__ ) + self.adapter(A__ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = '''bigscience/bloom-1b7'''
# Constant values
UpperCAmelCase__: List[Any] = 2.1_09_65_95_52_69_25_74
UpperCAmelCase__: str = '''Hello my name is'''
UpperCAmelCase__: List[Any] = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
UpperCAmelCase__: List[str] = 10
def __A ( self ):
# Models and tokenizer
A__ : Any = AutoTokenizer.from_pretrained(self.model_name )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
# Models and tokenizer
A__ : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
def __A ( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : int = self.model_abit.config
self.assertTrue(hasattr(A__ , """quantization_config""" ) )
A__ : Optional[int] = config.to_dict()
A__ : str = config.to_diff_dict()
A__ : List[str] = config.to_json_string()
def __A ( self ):
from bitsandbytes.nn import Paramsabit
A__ : Any = self.model_fpaa.get_memory_footprint()
A__ : Union[str, Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A__ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ):
A__ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Tuple = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A__ ) , self.EXPECTED_OUTPUTS )
def __A ( self ):
A__ : Tuple = BitsAndBytesConfig()
A__ : Dict = True
A__ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A__ , device_map="""auto""" )
A__ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Dict = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A__ ) , self.EXPECTED_OUTPUTS )
def __A ( self ):
with self.assertRaises(A__ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A__ )
def __A ( self ):
A__ : Dict = BitsAndBytesConfig()
with self.assertRaises(A__ ):
A__ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A__ , load_in_abit=A__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __A ( self ):
with self.assertRaises(A__ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(A__ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A__ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(A__ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A__ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Tuple = self.model_fpaa.to(torch.floataa )
A__ : List[str] = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : int = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : Tuple = self.model_fpaa.half()
# Check this does not throw an error
A__ : List[str] = self.model_fpaa.float()
def __A ( self ):
A__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=A__ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ):
A__ : int = """t5-small"""
A__ : Optional[Any] = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
A__ : List[Any] = AutoTokenizer.from_pretrained(cls.model_name )
A__ : str = """Translate in German: Hello, my dog is cute"""
def __A ( self ):
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
from transformers import TaForConditionalGeneration
A__ : Tuple = TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] = None
# test with `t5-small`
A__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
A__ : Optional[int] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Any = model.generate(**A__ )
# test with `flan-t5-small`
A__ : Optional[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A__ , device_map="""auto""" )
A__ : List[str] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Union[str, Any] = model.generate(**A__ )
A__ : Dict = modules
def __A ( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Tuple = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : List[Any] = model.generate(**A__ )
# test with `flan-t5-small`
A__ : str = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A__ , device_map="""auto""" )
A__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Tuple = model.generate(**A__ )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
# model_name
A__ : Any = """bigscience/bloom-560m"""
A__ : int = """t5-small"""
# Different types of model
A__ : Union[str, Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
# Sequence classification model
A__ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A__ , device_map="""auto""" )
# CausalLM model
A__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A__ , device_map="""auto""" )
# Seq2seq model
A__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A__ , device_map="""auto""" )
def __A ( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
def __A ( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : str = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
def __A ( self ):
A__ : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A__ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : Dict = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : Optional[int] = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A__ ) , self.EXPECTED_OUTPUTS )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : str = """facebook/opt-350m"""
super().setUp()
def __A ( self ):
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A__ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : List[str] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A__ ) ):
A__ : Any = LoRALayer(module.q_proj , rank=16 )
A__ : str = LoRALayer(module.k_proj , rank=16 )
A__ : List[str] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : List[Any] = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : List[str] = model.forward(**A__ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A__ , A__ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A__ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''gpt2-xl'''
UpperCAmelCase__: str = 3.31_91_85_48_54_15_21_87
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A_ : List[str] = 25_6047
A_ : Union[str, Any] = 25_6145
@require_sentencepiece
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = NllbTokenizer
UpperCAmelCase__: int = NllbTokenizerFast
UpperCAmelCase__: Union[str, Any] = True
UpperCAmelCase__: Union[str, Any] = True
UpperCAmelCase__: List[str] = {}
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ : Tuple = NllbTokenizer(A__ , keep_accents=A__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
A__ : Optional[int] = NllbTokenizer(A__ , keep_accents=A__ )
A__ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ : Dict = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(
A__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ : str = tokenizer.convert_ids_to_tokens(A__ )
self.assertListEqual(
A__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __A ( self ):
A__ : int = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Tuple = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : List[Any] = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Optional[int] = tempfile.mkdtemp()
A__ : str = tokenizer_r.save_pretrained(A__ )
A__ : str = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
A__ : str = tokenizer_r.from_pretrained(A__ )
A__ : Union[str, Any] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=True
A__ : str = tempfile.mkdtemp()
A__ : Optional[Any] = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
A__ : Union[str, Any] = tokenizer_p.save_pretrained(A__ )
# Checks it save with the same files
self.assertSequenceEqual(A__ , A__ )
# Checks everything loads correctly in the same way
A__ : Optional[int] = tokenizer_r.from_pretrained(A__ )
A__ : Union[str, Any] = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
# Save tokenizer rust, legacy_format=False
A__ : Dict = tempfile.mkdtemp()
A__ : int = tokenizer_r.save_pretrained(A__ , legacy_format=A__ )
A__ : Optional[int] = tokenizer_p.save_pretrained(A__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ : int = tokenizer_r.from_pretrained(A__ )
A__ : str = tokenizer_p.from_pretrained(A__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A__ , A__ ) )
shutil.rmtree(A__ )
@require_torch
def __A ( self ):
if not self.test_seqaseq:
return
A__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
A__ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
A__ : Tuple = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
A__ : Dict = tokenizer.prepare_seqaseq_batch(
src_texts=A__ , tgt_texts=A__ , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
A__ : Optional[int] = tokenizer.prepare_seqaseq_batch(
A__ , tgt_texts=A__ , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
A__ : List[Any] = tokenizer.prepare_seqaseq_batch(
src_texts=A__ , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , A__ )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def __A ( self ):
pass
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : int = [AddedToken("""<special>""" , lstrip=A__ )]
A__ : List[str] = self.rust_tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ )
A__ : Tuple = tokenizer_r.encode("""Hey this is a <special> token""" )
A__ : int = tokenizer_r.encode("""<special>""" , add_special_tokens=A__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A__ : Dict = self.rust_tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ , )
A__ : Union[str, Any] = self.tokenizer_class.from_pretrained(
A__ , additional_special_tokens=A__ , **A__ )
A__ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" )
A__ : Any = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = '''facebook/nllb-200-distilled-600M'''
UpperCAmelCase__: List[Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase__: Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCAmelCase__: Optional[int] = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def __A ( cls ):
A__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
A__ : Any = 1
return cls
def __A ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_6057 )
def __A ( self ):
A__ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def __A ( self ):
self.assertIn(A__ , self.tokenizer.all_special_ids )
# fmt: off
A__ : Union[str, Any] = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
A__ : List[Any] = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
A__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def __A ( self ):
A__ : Optional[int] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A__ )
A__ : int = 10
A__ : List[Any] = self.tokenizer(A__ , max_length=A__ , truncation=A__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , A__ )
self.assertEqual(len(A__ ) , A__ )
def __A ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_6203, 3] )
def __A ( self ):
A__ : int = tempfile.mkdtemp()
A__ : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A__ )
A__ : List[str] = NllbTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A__ )
@require_torch
def __A ( self ):
A__ : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ : Optional[Any] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(A__ , A__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
A__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A__ )
self.assertEqual(A__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __A ( self ):
A__ : List[str] = self.tokenizer(self.src_text , padding=A__ , truncation=A__ , max_length=3 , return_tensors="""pt""" )
A__ : str = self.tokenizer(
text_target=self.tgt_text , padding=A__ , truncation=A__ , max_length=10 , return_tensors="""pt""" )
A__ : Optional[int] = targets["""input_ids"""]
A__ : Dict = shift_tokens_right(
A__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __A ( self ):
A__ : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(A__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_6047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_6057,
} , )
@require_torch
def __A ( self ):
A__ : str = True
A__ : int = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
A__ : Any = False
A__ : Any = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : int = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
A__ : Dict = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(A__ )
from datasets import load_dataset
A__ : Union[str, Any] = load_dataset("""nielsr/rvlcdip-demo""" )
A__ : List[str] = dataset["""train"""][0]["""image"""].convert("""RGB""" )
A__ : int = image_processor(A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
A__ : Optional[int] = model(**A__ )
A__ : int = outputs.logits
A__ : List[str] = torch.Size((1, 16) )
self.assertEqual(logits.shape , A__ )
A__ : Optional[int] = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=A__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , A__ , atol=1e-4 ) )
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 1 |
from __future__ import annotations
class _a :
'''simple docstring'''
def __init__( self , A__ = 0 ):
A__ : Union[str, Any] = key
def __A ( self , A__ , A__ ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
A__ : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A__ ) ^ key ) for ch in content]
def __A ( self , A__ , A__ ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
A__ : str = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A__ ) ^ key ) for ch in content]
def __A ( self , A__ , A__ = 0 ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
A__ : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
A__ : List[str] = """"""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def __A ( self , A__ , A__ = 0 ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
A__ : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
A__ : List[Any] = """"""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def __A ( self , A__ , A__ = 0 ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A__ , A__ ) )
except OSError:
return False
return True
def __A ( self , A__ , A__ ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A__ , A__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 64 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ : Tuple = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCamelCase (lowercase_: Dict , lowercase_: Dict , lowercase_: Optional[int] ) -> Union[str, Any]:
A__ : List[Any] = state_dict.pop(lowercase_ )
A__ : Tuple = val
def UpperCamelCase (lowercase_: Dict ) -> List[str]:
A__ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ : int = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A__ : Any = value
else:
A__ : Optional[int] = value
return new_state_dict
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Dict=False ) -> Optional[int]:
A__ : List[str] = """"""
if is_panoptic:
A__ : List[Any] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ : int = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A__ : List[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ : Dict = in_proj_weight[:256, :]
A__ : List[Any] = in_proj_bias[:256]
A__ : Optional[Any] = in_proj_weight[256:512, :]
A__ : Tuple = in_proj_bias[256:512]
A__ : Dict = in_proj_weight[-256:, :]
A__ : Dict = in_proj_bias[-256:]
def UpperCamelCase () -> List[Any]:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Optional[int] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase (lowercase_: str , lowercase_: int ) -> int:
A__ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A__ : Optional[int] = """resnet101"""
if "dc5" in model_name:
A__ : str = True
A__ : Union[str, Any] = """panoptic""" in model_name
if is_panoptic:
A__ : Tuple = 250
else:
A__ : Tuple = 91
A__ : Union[str, Any] = """huggingface/label-files"""
A__ : str = """coco-detection-id2label.json"""
A__ : Tuple = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
A__ : List[Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ : Optional[Any] = idalabel
A__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load image processor
A__ : str = """coco_panoptic""" if is_panoptic else """coco_detection"""
A__ : List[Any] = ConditionalDetrImageProcessor(format=lowercase_ )
# prepare image
A__ : Union[str, Any] = prepare_img()
A__ : Any = image_processor(images=lowercase_ , return_tensors="""pt""" )
A__ : int = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
A__ : Any = torch.hub.load("""DeppMeng/ConditionalDETR""" , lowercase_ , pretrained=lowercase_ ).eval()
A__ : int = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A__ : List[str] = """conditional_detr.""" + src
rename_key(lowercase_ , lowercase_ , lowercase_ )
A__ : int = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ , is_panoptic=lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ : Union[str, Any] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A__ : List[Any] = state_dict.pop(lowercase_ )
A__ : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A__ : Dict = state_dict.pop(lowercase_ )
A__ : int = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A__ : Optional[int] = state_dict.pop(lowercase_ )
A__ : List[str] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A__ : Dict = state_dict.pop(lowercase_ )
A__ : str = val
# finally, create HuggingFace model and load state dict
A__ : Optional[int] = ConditionalDetrForSegmentation(lowercase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
model.push_to_hub(repo_id=lowercase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A__ : Tuple = conditional_detr(lowercase_ )
A__ : Tuple = model(lowercase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
A_ : Union[str, Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 64 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 1 |
import math
def UpperCamelCase (lowercase_: Dict , lowercase_: List[Any] ) -> Optional[int]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
A_ : Optional[int] = 'Enter the base and the power separated by a comma: '
A_ , A_ : Dict = map(int, input(prompt).split(','))
A_ , A_ : Optional[Any] = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
A_ : Any = res(xa, ya)
A_ : str = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def UpperCamelCase (lowercase_: Any , lowercase_: Tuple=False ) -> Dict:
A__ : List[str] = OmegaConf.load(lowercase_ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase_ ) ) )
return config
def UpperCamelCase (lowercase_: Tuple , lowercase_: Dict=None , lowercase_: Optional[int]=None ) -> List[Any]:
if conf_path is None:
A__ : Any = """./model_checkpoints/vqgan_only.yaml"""
A__ : List[str] = load_config(lowercase_ , display=lowercase_ )
A__ : List[str] = VQModel(**config.model.params )
if ckpt_path is None:
A__ : Optional[int] = """./model_checkpoints/vqgan_only.pt"""
A__ : Union[str, Any] = torch.load(lowercase_ , map_location=lowercase_ )
if ".ckpt" in ckpt_path:
A__ : Dict = sd["""state_dict"""]
model.load_state_dict(lowercase_ , strict=lowercase_ )
model.to(lowercase_ )
del sd
return model
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple ) -> Optional[int]:
A__ , A__ , A__ : str = model.encode(lowercase_ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
A__ : Tuple = model.decode(lowercase_ )
return xrec
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple=False ) -> str:
A__ , A__ : int = string.rsplit(""".""" , 1 )
if reload:
A__ : List[Any] = importlib.import_module(lowercase_ )
importlib.reload(lowercase_ )
return getattr(importlib.import_module(lowercase_ , package=lowercase_ ) , cls )
def UpperCamelCase (lowercase_: List[str] ) -> List[Any]:
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def UpperCamelCase (lowercase_: int , lowercase_: Optional[Any] , lowercase_: Optional[Any]=True , lowercase_: Tuple=True ) -> str:
A__ : Union[str, Any] = instantiate_from_config(lowercase_ )
if sd is not None:
model.load_state_dict(lowercase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def UpperCamelCase (lowercase_: Any , lowercase_: Tuple , lowercase_: Union[str, Any] , lowercase_: Union[str, Any] ) -> int:
# load the specified checkpoint
if ckpt:
A__ : Any = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = pl_sd["""global_step"""]
print(f"""loaded model from global step {global_step}.""" )
else:
A__ : Any = {"""state_dict""": None}
A__ : Dict = None
A__ : Optional[int] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowercase_ , eval_mode=lowercase_ )["""model"""]
return model, global_step
| 64 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A_ : Dict = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> Any:
if "xprophetnet" in prophetnet_checkpoint_path:
A__ : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase_ )
A__ , A__ : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase_ , output_loading_info=lowercase_ )
else:
A__ : int = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase_ )
A__ , A__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowercase_ , output_loading_info=lowercase_ )
A__ : Any = ["""key_proj""", """value_proj""", """query_proj"""]
A__ : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
A__ : Optional[int] = key.split(""".""" )
if attributes[0] == "lm_head":
A__ : int = prophet
A__ : Tuple = prophet_old
else:
A__ : Optional[int] = prophet.prophetnet
A__ : Optional[int] = prophet_old.model
A__ : List[str] = False
for attribute in attributes:
if attribute in mapping:
A__ : str = mapping[attribute]
if not hasattr(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = attribute
elif hasattr(lowercase_ , lowercase_ ):
A__ : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
A__ : Union[str, Any] = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
A__ : List[str] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
A__ : Tuple = old_model.bias
logger.info(f"""{attribute} is initialized""" )
A__ : Optional[Any] = True
break
elif attribute in special_keys and hasattr(lowercase_ , """in_proj_weight""" ):
A__ : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
A__ : Dict = getattr(lowercase_ , lowercase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
A__ : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
A__ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
A__ : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
A__ : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
A__ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
A__ : Dict = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
A__ : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
A__ : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
A__ : str = True
break
if attribute.isdigit():
A__ : Optional[Any] = model[int(lowercase_ )]
A__ : Optional[int] = old_model[int(lowercase_ )]
else:
A__ : List[Any] = getattr(lowercase_ , lowercase_ )
if old_attribute == "":
A__ : Any = old_model
else:
if not hasattr(lowercase_ , lowercase_ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
A__ : str = getattr(lowercase_ , lowercase_ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : Union[str, Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 64 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = ['''pixel_values''']
def __init__( self , A__ = True , A__ = 32 , A__=PILImageResampling.BILINEAR , A__ = True , **A__ , ):
A__ : int = do_resize
A__ : Tuple = do_rescale
A__ : Tuple = size_divisor
A__ : List[Any] = resample
super().__init__(**A__ )
def __A ( self , A__ , A__ , A__ , A__ = None , **A__ ):
A__ , A__ : str = get_image_size(A__ )
# Rounds the height and width down to the closest multiple of size_divisor
A__ : List[str] = height // size_divisor * size_divisor
A__ : List[Any] = width // size_divisor * size_divisor
A__ : List[Any] = resize(A__ , (new_h, new_w) , resample=A__ , data_format=A__ , **A__ )
return image
def __A ( self , A__ , A__ , A__ = None , **A__ ):
return rescale(image=A__ , scale=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ = None , A__ = None , A__=None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
A__ : Any = do_resize if do_resize is not None else self.do_resize
A__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A__ : int = size_divisor if size_divisor is not None else self.size_divisor
A__ : Optional[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
A__ : str = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
A__ : int = [to_numpy_array(A__ ) for img in images]
if do_resize:
A__ : Optional[Any] = [self.resize(A__ , size_divisor=A__ , resample=A__ ) for image in images]
if do_rescale:
A__ : List[Any] = [self.rescale(A__ , scale=1 / 255 ) for image in images]
A__ : Union[str, Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
A__ : int = {"""pixel_values""": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 64 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCamelCase (lowercase_: Dict ) -> List[str]:
def wrapper(*lowercase_: List[Any] , **lowercase_: str ):
A__ : List[Any] = timeit.default_timer()
A__ : List[str] = func(*lowercase_ , **lowercase_ )
A__ : Any = timeit.default_timer() - starttime
return delta
A__ : List[str] = func.__name__
return wrapper
def UpperCamelCase (lowercase_: dict , lowercase_: Any=100 , lowercase_: List[Any]=None ) -> str:
A__ : Dict = []
A__ : List[Any] = seq_shapes or {}
for i in range(lowercase_ ):
A__ : Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowercase_ , _ArrayXD ):
A__ : Union[str, Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowercase_ , datasets.Value ):
if v.dtype == "string":
A__ : Optional[Any] = """The small grey turtle was surprisingly fast when challenged."""
else:
A__ : List[str] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowercase_ , datasets.Sequence ):
while isinstance(lowercase_ , datasets.Sequence ):
A__ : Optional[Any] = v.feature
A__ : str = seq_shapes[k]
A__ : Dict = np.random.rand(*lowercase_ ).astype(v.dtype )
A__ : List[Any] = data
dummy_data.append((i, example) )
return dummy_data
def UpperCamelCase (lowercase_: Any , lowercase_: Tuple , lowercase_: Union[str, Any]=100 , lowercase_: Optional[int]=None ) -> str:
A__ : Union[str, Any] = generate_examples(lowercase_ , num_examples=lowercase_ , seq_shapes=lowercase_ )
with ArrowWriter(features=lowercase_ , path=lowercase_ ) as writer:
for key, record in dummy_data:
A__ : List[Any] = features.encode_example(lowercase_ )
writer.write(lowercase_ )
A__ , A__ : List[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
A__ : Optional[Any] = datasets.Dataset.from_file(filename=lowercase_ , info=datasets.DatasetInfo(features=lowercase_ ) )
return dataset
| 64 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 1 |
def UpperCamelCase (lowercase_: str ) -> int:
A__ : Optional[int] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
A__ : Optional[Any] = hex_num[0] == """-"""
if is_negative:
A__ : int = hex_num[1:]
try:
A__ : Optional[Any] = int(lowercase_ , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
A__ : Tuple = """"""
while int_num > 0:
A__ : List[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.