code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase_ : Dict = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
for attribute in key.split('''.''' ):
a =getattr(lowercase , lowercase )
if weight_type is not None:
a =getattr(lowercase , lowercase ).shape
else:
a =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a =value
elif weight_type == "weight_g":
a =value
elif weight_type == "weight_v":
a =value
elif weight_type == "bias":
a =value
else:
a =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =[]
a =fairseq_model.state_dict()
a =hf_model.feature_extractor
for name, value in fairseq_dict.items():
a =False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == '''group''' , )
a =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a =True
if "*" in mapped_key:
a =name.split(lowercase )[0].split('''.''' )[-2]
a =mapped_key.replace('''*''' , lowercase )
if "weight_g" in name:
a ='''weight_g'''
elif "weight_v" in name:
a ='''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
a ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a ='''weight'''
else:
a =None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =full_name.split('''conv_layers.''' )[-1]
a =name.split('''.''' )
a =int(items[0] )
a =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase=None ):
"""simple docstring"""
# load the pre-trained checkpoints
a =torch.load(lowercase )
a =WavLMConfigOrig(checkpoint['''cfg'''] )
a =WavLMOrig(lowercase )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
a =WavLMConfig.from_pretrained(lowercase )
else:
a =WavLMConfig()
a =WavLMModel(lowercase )
recursively_load_weights(lowercase , lowercase )
hf_wavlm.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase_ : Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 81
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 0
|
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = len(snake_case )
_lowerCAmelCase = sum(snake_case )
_lowerCAmelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowerCAmelCase = True
for i in range(1 , s + 1 ):
_lowerCAmelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowerCAmelCase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowerCAmelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowerCAmelCase = s - 2 * j
break
return diff
| 82
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1_0_0 , ):
_UpperCamelCase : Union[str, Any] = x_start
_UpperCamelCase : List[Any] = fnc(UpperCAmelCase_ )
_UpperCamelCase : int = 0.0
for _ in range(UpperCAmelCase_ ):
# Approximates curve as a sequence of linear lines and sums their length
_UpperCamelCase : Union[str, Any] = (x_end - x_start) / steps + xa
_UpperCamelCase : Any = fnc(UpperCAmelCase_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_UpperCamelCase : Optional[int] = xa
_UpperCamelCase : List[str] = fxa
return length
if __name__ == "__main__":
def A__ ( UpperCAmelCase_ ):
return math.sin(1_0 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
snake_case_ : List[Any] = 10
while i <= 100000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 83
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['DPTFeatureExtractor']
A : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 305
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowerCAmelCase_ :dict[int, int] = {}
lowerCAmelCase_ :int = 2
while True:
lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ )
if factor:
lowerCAmelCase_ :Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ :List[str] = factor
else:
lowerCAmelCase_ :Optional[int] = prime
yield prime
prime += 1
def _snake_case ( lowercase__ : float = 1E10 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = sieve()
lowerCAmelCase_ :str = 1
while True:
lowerCAmelCase_ :int = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 84
|
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.dummy_uncond_unet
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=a__ , scheduler=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(num_inference_steps=2 , generator=a__ , output_type="numpy" ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(num_inference_steps=2 , generator=a__ , output_type="numpy" , return_dict=a__ )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = "google/ncsnpp-celebahq-256"
snake_case_ = UNetaDModel.from_pretrained(a__ )
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=a__ , scheduler=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(num_inference_steps=20 , generator=a__ , output_type="numpy" ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 85
|
A : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 305
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Dict = image_size
__lowerCAmelCase : Union[str, Any] = num_channels
__lowerCAmelCase : List[Any] = embeddings_size
__lowerCAmelCase : Tuple = hidden_sizes
__lowerCAmelCase : Dict = depths
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Any = use_labels
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Any = num_labels
__lowerCAmelCase : int = scope
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = TFResNetModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : Tuple = TFResNetForImageClassification(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = config_and_inputs
__lowerCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A_ : Dict = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : int = False
A_ : Optional[int] = False
A_ : int = False
A_ : str = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFResNetModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Dict = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : int = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase : Dict = layer_type
__lowerCAmelCase : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = TFResNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCAmelCase : Tuple = self.default_image_processor
__lowerCAmelCase : int = prepare_img()
__lowerCAmelCase : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
# forward pass
__lowerCAmelCase : Any = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 86
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionDiffEditPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
lowercase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
lowercase__ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_zero=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase__ = CLIPTextModel(_UpperCAmelCase )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=0 ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor((1, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe(**_UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__ = self.pipeline_class.from_pretrained(_UpperCAmelCase )
pipe_loaded.to(_UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_UpperCAmelCase , _UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe_loaded(**_UpperCAmelCase )[0]
lowercase__ = np.abs(output - output_loaded ).max()
self.assertLess(_UpperCAmelCase , 1E-4 )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_mask_inputs(_UpperCAmelCase )
lowercase__ = pipe.generate_mask(**_UpperCAmelCase )
lowercase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase__ = np.array([0] * 9 )
lowercase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase__ = DPMSolverMultistepScheduler(**_UpperCAmelCase )
lowercase__ = DPMSolverMultistepInverseScheduler(**_UpperCAmelCase )
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCamelCase__ (cls : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase__ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase__ = raw_image
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase , num_inference_steps=25 , ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 305
| 0
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any):
return (abs(source - target) / target) < 0.01
@pytest.mark.integration
def lowercase_ ( _lowerCamelCase : Any):
lowercase__ : Optional[int] = _TestCommandArgs(dataset=_lowerCamelCase , all_configs=_lowerCamelCase , save_infos=_lowerCamelCase)
lowercase__ : int = TestCommand(*_lowerCamelCase)
test_command.run()
lowercase__ : Dict = os.path.join(_lowerCamelCase , "README.md")
assert os.path.exists(_lowerCamelCase)
lowercase__ : str = DatasetInfosDict.from_directory(_lowerCamelCase)
lowercase__ : Dict = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string")),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"])),
"langs": Sequence(Value("string")),
"spans": Sequence(Value("string")),
}) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
})
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase__ , lowercase__ : Union[str, Any] = getattr(dataset_infos["default"] , _lowerCamelCase), getattr(expected_dataset_infos["default"] , _lowerCamelCase)
if key == "num_bytes":
assert is_apercent_close(_lowerCamelCase , _lowerCamelCase)
elif key == "splits":
assert list(_lowerCamelCase) == list(_lowerCamelCase)
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes)
else:
result == expected
| 87
|
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = MvpTokenizer
a__ = MvpTokenizerFast
a__ = True
a__ = filter_roberta_detectors
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
__magic_name__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__magic_name__ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__magic_name__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__magic_name__ = {"""unk_token""": """<unk>"""}
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
def _lowercase ( self : Union[str, Any] , **UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self : Optional[int] , **UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self : Any , UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def _lowercase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__magic_name__ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(UpperCamelCase__ , max_length=len(UpperCamelCase__ ) , padding=UpperCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__magic_name__ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# Test that special tokens are reset
@require_torch
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , UpperCamelCase__ )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertNotIn("""labels""" , UpperCamelCase__ )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ )
@require_torch
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
__magic_name__ = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__magic_name__ = ["""A long paragraph for summarization."""]
__magic_name__ = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ , return_tensors="""pt""" )
__magic_name__ = inputs["""input_ids"""]
__magic_name__ = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = """A, <mask> AllenNLP sentence."""
__magic_name__ = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
__magic_name__ = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__magic_name__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__magic_name__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCamelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 88
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = True
A__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 305
| 0
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __magic_name__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple ,_UpperCAmelCase : float ,_UpperCAmelCase : Callable ,_UpperCAmelCase : int ,_UpperCAmelCase : float = 1.0 ,_UpperCAmelCase : str = None ,):
super().__init__()
_a : List[Any] = initial_learning_rate
_a : Optional[int] = warmup_steps
_a : List[str] = power
_a : Dict = decay_schedule_fn
_a : List[Any] = name
def __call__( self : Optional[int] ,_UpperCAmelCase : Dict ):
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_a : str = tf.cast(_UpperCAmelCase ,tf.floataa )
_a : List[str] = tf.cast(self.warmup_steps ,tf.floataa )
_a : Tuple = global_step_float / warmup_steps_float
_a : Optional[int] = self.initial_learning_rate * tf.math.pow(_UpperCAmelCase ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=_UpperCAmelCase ,)
def __lowercase ( self : Any ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 0.9 , lowerCAmelCase_ = 0.999 , lowerCAmelCase_ = 1E-8 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = None , ) -> List[Any]:
_a : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCAmelCase_ , )
if num_warmup_steps:
_a : Optional[int] = WarmUp(
initial_learning_rate=lowerCAmelCase_ , decay_schedule_fn=lowerCAmelCase_ , warmup_steps=lowerCAmelCase_ , )
if weight_decay_rate > 0.0:
_a : Union[str, Any] = AdamWeightDecay(
learning_rate=lowerCAmelCase_ , weight_decay_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=lowerCAmelCase_ , )
else:
_a : List[str] = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , beta_a=lowerCAmelCase_ , epsilon=lowerCAmelCase_ , clipnorm=lowerCAmelCase_ , global_clipnorm=lowerCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : str ,_UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_01 ,_UpperCAmelCase : float = 0.9 ,_UpperCAmelCase : float = 0.9_99 ,_UpperCAmelCase : float = 1E-7 ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : str = "AdamWeightDecay" ,**_UpperCAmelCase : int ,):
super().__init__(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = weight_decay_rate
_a : int = include_in_weight_decay
_a : str = exclude_from_weight_decay
@classmethod
def __lowercase ( cls : Any ,_UpperCAmelCase : int ):
_a : int = {'WarmUp': WarmUp}
return super(_UpperCAmelCase ,cls ).from_config(_UpperCAmelCase ,custom_objects=_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ):
super(_UpperCAmelCase ,self )._prepare_local(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : str = tf.constant(
self.weight_decay_rate ,name='adam_weight_decay_rate' )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ):
_a : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] ,use_locking=self._use_locking ,)
return tf.no_op()
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str]=None ,**_UpperCAmelCase : Optional[int] ):
_a , _a : int = list(zip(*_UpperCAmelCase ) )
return super(_UpperCAmelCase ,self ).apply_gradients(zip(_UpperCAmelCase ,_UpperCAmelCase ) ,name=_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_a : Union[str, Any] = apply_state or {}
_a : Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_a : Any = self._fallback_apply_state(_UpperCAmelCase ,_UpperCAmelCase )
_a : Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __lowercase ( self : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int=None ):
_a , _a : str = self._get_lr(var.device ,var.dtype.base_dtype ,_UpperCAmelCase )
_a : Any = self._decay_weights_op(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCAmelCase ,self )._resource_apply_dense(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any]=None ):
_a , _a : Tuple = self._get_lr(var.device ,var.dtype.base_dtype ,_UpperCAmelCase )
_a : Tuple = self._decay_weights_op(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCAmelCase ,self )._resource_apply_sparse(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : List[Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_UpperCAmelCase ,_UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_UpperCAmelCase ,_UpperCAmelCase ) is not None:
return False
return True
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : List[str] ):
_a : Optional[Any] = []
_a : Any = None
@property
def __lowercase ( self : int ):
if self._accum_steps is None:
_a : List[str] = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=_UpperCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def __lowercase ( self : str ):
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int ,_UpperCAmelCase : Union[str, Any] ):
if not self._gradients:
_a : Dict = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_UpperCAmelCase ) ,trainable=_UpperCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_UpperCAmelCase )}""" )
for accum_gradient, gradient in zip(self._gradients ,_UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_UpperCAmelCase )
self._accum_steps.assign_add(1 )
def __lowercase ( self : List[str] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_UpperCAmelCase ) )
| 89
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__="divided_space_time" , lowerCamelCase__=None , ) -> Any:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_frames
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = attention_type
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCamelCase = self.num_labels
return config
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = TimesformerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify the logits shape
__lowerCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TimesformerModelTester(self )
__lowerCamelCase = ConfigTester(
self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> int:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TimesformerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = self.model_tester.seq_length
__lowerCamelCase = self.model_tester.num_frames
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCamelCase = len(lowerCamelCase__ )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase__ ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
__lowerCamelCase = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(video[:8] , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 90
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = ShapEImgaImgPipeline
A__ = ['''image''']
A__ = ['''image''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
return 8
@property
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ (self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
lowercase__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowercase__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowercase__ = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 305
| 0
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase_ : Union[str, Any] = """<<<<<<< This should probably be modified because it mentions: """
UpperCAmelCase_ : Optional[Any] = """=======
>>>>>>>
"""
UpperCAmelCase_ : Union[str, Any] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
UpperCAmelCase_ : List[str] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def _A (__a ) -> Optional[int]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : ArgumentParser):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=lowercase_ , required=lowercase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=lowercase_ , required=lowercase_ , help='''Path to the HuggingFace Datasets folder.''')
train_parser.set_defaults(func=lowercase_)
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : str , *lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = get_logger('''datasets-cli/converting''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = tfds_path
SCREAMING_SNAKE_CASE_ : Tuple = datasets_directory
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
if os.path.isdir(self._tfds_path):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
SCREAMING_SNAKE_CASE_ : List[str] = os.path.dirname(self._tfds_path)
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''')
SCREAMING_SNAKE_CASE_ : Any = os.path.abspath(self._datasets_directory)
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}')
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
if os.path.isdir(self._tfds_path):
SCREAMING_SNAKE_CASE_ : List[str] = os.listdir(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}')
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = os.path.join(lowercase_ , lowercase_)
if not os.path.isfile(lowercase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''')
continue
with open(lowercase_ , encoding='''utf-8''') as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : List[str] = []
for line in lines:
SCREAMING_SNAKE_CASE_ : List[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE_ : List[str] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''''''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE_ : str = '''from datasets import logging\n'''
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE_ : List[str] = out_line.replace('''getLogger''' , '''get_logger''')
elif any(expression in out_line for expression in TO_HIGHLIGHT):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = list(filter(lambda lowercase_: e in out_line , lowercase_))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase_) + '''\n''')
out_lines.append(lowercase_)
out_lines.append(lowercase_)
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(lowercase_ , lowercase_ , lowercase_)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE_ : Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , lowercase_)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(''','''))
SCREAMING_SNAKE_CASE_ : str = '''from . import ''' + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}')
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE_ : List[str] = True
out_lines.append(lowercase_)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE_ : List[Any] = f_name.replace('''.py''' , '''''')
SCREAMING_SNAKE_CASE_ : Any = os.path.join(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = os.path.join(lowercase_ , lowercase_)
os.makedirs(lowercase_ , exist_ok=lowercase_)
self._logger.info(F'Adding directory {output_dir}')
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowercase_)
if needs_manual_update:
with_manual_update.append(lowercase_)
with open(lowercase_ , '''w''' , encoding='''utf-8''') as f:
f.writelines(lowercase_)
self._logger.info(F'Converted in {output_file}')
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''')]
self._logger.info(F'Moving {dest_folder} to {utils_file}')
shutil.copy(lowercase_ , lowercase_)
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.')
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.')
| 91
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __magic_name__ : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase__ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowercase__ = BeautifulSoup(requests.get(__magic_name__ ).text , """html.parser""" )
lowercase__ = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 305
| 0
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str=False , ):
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , use_external_data_format=SCREAMING_SNAKE_CASE_ , enable_onnx_checker=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , )
else:
export(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False ):
__lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_ )
# TEXT ENCODER
__lowerCAmelCase = pipeline.text_encoder.config.max_position_embeddings
__lowerCAmelCase = pipeline.text_encoder.config.hidden_size
__lowerCAmelCase = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=SCREAMING_SNAKE_CASE_ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=SCREAMING_SNAKE_CASE_ , )
del pipeline.text_encoder
# UNET
__lowerCAmelCase = pipeline.unet.config.in_channels
__lowerCAmelCase = pipeline.unet.config.sample_size
__lowerCAmelCase = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
torch.randn(2 ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
torch.randn(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
False,
) , output_path=SCREAMING_SNAKE_CASE_ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=SCREAMING_SNAKE_CASE_ , use_external_data_format=SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = str(unet_path.absolute().as_posix() )
__lowerCAmelCase = os.path.dirname(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = onnx.load(SCREAMING_SNAKE_CASE_ )
# clean up existing tensor files
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
os.mkdir(SCREAMING_SNAKE_CASE_ )
# collate external tensor files into one
onnx.save_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_as_external_data=SCREAMING_SNAKE_CASE_ , all_tensors_to_one_file=SCREAMING_SNAKE_CASE_ , location="weights.pb" , convert_attribute=SCREAMING_SNAKE_CASE_ , )
del pipeline.unet
# VAE ENCODER
__lowerCAmelCase = pipeline.vae
__lowerCAmelCase = vae_encoder.config.in_channels
__lowerCAmelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__lowerCAmelCase = lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : vae_encoder.encode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0].sample()
onnx_export(
SCREAMING_SNAKE_CASE_ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=SCREAMING_SNAKE_CASE_ , )
# VAE DECODER
__lowerCAmelCase = pipeline.vae
__lowerCAmelCase = vae_decoder.config.latent_channels
__lowerCAmelCase = vae_decoder.config.out_channels
# forward only through the decoder part
__lowerCAmelCase = vae_encoder.decode
onnx_export(
SCREAMING_SNAKE_CASE_ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=SCREAMING_SNAKE_CASE_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__lowerCAmelCase = pipeline.safety_checker
__lowerCAmelCase = safety_checker.config.vision_config.num_channels
__lowerCAmelCase = safety_checker.config.vision_config.image_size
__lowerCAmelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
torch.randn(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=SCREAMING_SNAKE_CASE_ , )
del pipeline.safety_checker
__lowerCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
__lowerCAmelCase = pipeline.feature_extractor
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(SCREAMING_SNAKE_CASE_ )
print("ONNX pipeline saved to" , SCREAMING_SNAKE_CASE_ )
del pipeline
del onnx_pipeline
__lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCamelCase__ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 92
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''falcon'''
A__ = ['''past_key_values''']
def __init__(self : str , _UpperCAmelCase : Dict=6_5024 , _UpperCAmelCase : Optional[Any]=4544 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Optional[Any]=71 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=11 , _UpperCAmelCase : Optional[Any]=11 , **_UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("""n_embed""" , _UpperCAmelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 305
| 0
|
'''simple docstring'''
import numpy as np
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : Optional[int] = (0, 0)
lowercase_ : Dict = None
lowercase_ : Optional[int] = 0
lowercase_ : List[Any] = 0
lowercase_ : Dict = 0
def __eq__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.position == cell.position
def _snake_case ( self ):
"""simple docstring"""
print(self.position )
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE=(5, 5) ):
"""simple docstring"""
lowercase_ : Union[str, Any] = np.zeros(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = world_size[0]
lowercase_ : Dict = world_size[1]
def _snake_case ( self ):
"""simple docstring"""
print(self.w )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowercase_ : str = cell.position[0]
lowercase_ : int = cell.position[1]
lowercase_ : Optional[int] = []
for n in neughbour_cord:
lowercase_ : Optional[Any] = current_x + n[0]
lowercase_ : List[str] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowercase_ : str = Cell()
lowercase_ : List[str] = (x, y)
lowercase_ : Any = cell
neighbours.append(__SCREAMING_SNAKE_CASE )
return neighbours
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Any = []
lowercase_ : int = []
_open.append(__SCREAMING_SNAKE_CASE )
while _open:
lowercase_ : str = np.argmin([n.f for n in _open] )
lowercase_ : Any = _open[min_f]
_closed.append(_open.pop(__SCREAMING_SNAKE_CASE ) )
if current == goal:
break
for n in world.get_neigbours(__SCREAMING_SNAKE_CASE ):
for c in _closed:
if c == n:
continue
lowercase_ : Optional[int] = current.g + 1
lowercase_ , lowercase_ : str = n.position
lowercase_ , lowercase_ : int = goal.position
lowercase_ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
lowercase_ : Optional[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = []
while current.parent is not None:
path.append(current.position )
lowercase_ : Dict = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_lowercase : Optional[int] = Gridworld()
# Start position and goal
_lowercase : List[str] = Cell()
_lowercase : Optional[Any] = (0, 0)
_lowercase : Union[str, Any] = Cell()
_lowercase : Optional[Any] = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
_lowercase : Any = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_lowercase : List[Any] = 1
print(world.w)
| 93
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case : Union[str, Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ):
"""simple docstring"""
a :List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a :int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
self.register_modules(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , )
a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if latents is None:
a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
a :Any = latents.to(_lowerCamelCase )
a :Dict = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a :int = torch.device(F'''cuda:{gpu_id}''' )
a :int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a :Any = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a :Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase )
# We'll offload the last model manually.
a :str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
a :int = self._execution_device
a :Optional[Any] = guidance_scale > 1.0
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 )
a :Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :List[str] = torch.cat(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
a :Optional[Any] = self.scheduler.timesteps
a :List[str] = self.unet.config.in_channels
a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor )
# create initial latent
a :int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a :Union[str, Any] = {'''image_embeds''': image_embeds}
a :Optional[Any] = self.unet(
sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
if do_classifier_free_guidance:
a , a :Any = noise_pred.split(latents.shape[1] , dim=1 )
a , a :List[str] = noise_pred.chunk(2 )
a , a :int = variance_pred.chunk(2 )
a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a :int = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0]
# post-processing
a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
a :str = image * 0.5 + 0.5
a :List[Any] = image.clamp(0 , 1 )
a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a :str = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 94
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305
| 0
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowerCAmelCase :
_lowercase : Optional[int] = LEDConfig
_lowercase : Optional[Any] = {}
_lowercase : Optional[int] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=4 , ) -> int:
'''simple docstring'''
a__ : int =parent
a__ : int =batch_size
a__ : Dict =seq_length
a__ : str =is_training
a__ : Optional[int] =use_labels
a__ : Optional[Any] =vocab_size
a__ : List[str] =hidden_size
a__ : Dict =num_hidden_layers
a__ : str =num_attention_heads
a__ : str =intermediate_size
a__ : Dict =hidden_dropout_prob
a__ : Tuple =attention_probs_dropout_prob
a__ : List[str] =max_position_embeddings
a__ : Dict =eos_token_id
a__ : Tuple =pad_token_id
a__ : Optional[Any] =bos_token_id
a__ : int =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a__ : Union[str, Any] =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a__ : List[str] =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ : List[Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
a__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Tuple =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a__ : int =prepare_led_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str =tf.concat(
[tf.zeros_like(lowerCAmelCase__ )[:, :-1], tf.ones_like(lowerCAmelCase__ )[:, -1:]] , axis=-1 , )
a__ : Dict =global_attention_mask
return config, inputs_dict
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Any =TFLEDModel(config=lowerCAmelCase__ ).get_decoder()
a__ : Tuple =inputs_dict["input_ids"]
a__ : Any =input_ids[:1, :]
a__ : List[str] =inputs_dict["attention_mask"][:1, :]
a__ : List[Any] =1
# first forward pass
a__ : Any =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a__ , a__ : Optional[int] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ : Optional[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : str =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ : Optional[int] =tf.concat([input_ids, next_tokens] , axis=-1 )
a__ : List[str] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : Tuple =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ : Any =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx]
a__ : int =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
a__ : Optional[int] =tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ : Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ : Optional[int] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : int = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Optional[int] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = True
_lowercase : List[str] = False
_lowercase : List[str] = False
_lowercase : Dict = False
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[str] =TFLEDModelTester(self )
a__ : Any =ConfigTester(self , config_class=lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ , a__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[str] =tf.zeros_like(inputs_dict["attention_mask"] )
a__ : Tuple =2
a__ : int =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
a__ : Dict =True
a__ : List[Any] =self.model_tester.seq_length
a__ : Dict =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCAmelCase__ ):
a__ : List[Any] =outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ ):
a__ : Optional[int] =[t.numpy() for t in outputs.encoder_attentions]
a__ : Any =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a__ : Optional[int] =True
a__ : List[Any] =False
a__ : Any =False
a__ : List[str] =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[int] =len(lowerCAmelCase__ )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
if self.is_encoder_decoder:
a__ : Union[str, Any] =model_class(lowerCAmelCase__ )
a__ : Dict =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_decoder_attentions_output(lowerCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ : str =True
a__ : Union[str, Any] =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
# Check attention is always last and order is fine
a__ : int =True
a__ : Tuple =True
a__ : Tuple =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> str:
'''simple docstring'''
pass
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Tuple =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
a__ : Any =_long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int =_long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : List[str] =prepare_led_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =model(**lowerCAmelCase__ )[0]
a__ : Optional[int] =(1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
a__ : Optional[int] =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-3 )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Tuple =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
a__ : Union[str, Any] =_long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : Dict =_long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int =prepare_led_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[int] =model(**lowerCAmelCase__ )[0]
a__ : Any =(1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
a__ : Dict =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-3 , rtol=1E-3 )
| 95
|
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305
| 0
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : Tuple = psutil.Process()
_lowerCamelCase : Optional[int] = False
def A_ ( self ):
_lowerCamelCase : Tuple = -1
while True:
_lowerCamelCase : List[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self ):
_lowerCamelCase : List[str] = True
_lowerCamelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
_lowerCamelCase : Optional[Any] = True
self.thread.start()
def A_ ( self ):
_lowerCamelCase : Optional[Any] = False
self.thread.join()
return self.cpu_memory_peak
lowercase__ = PeakCPUMemory()
def _snake_case ( ):
# Time
_lowerCamelCase : Dict = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCamelCase : str = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCamelCase : List[Any] = torch.cuda.memory_allocated(lowercase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def _snake_case ( lowercase__ ):
# Time
_lowerCamelCase : Any = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCamelCase : Dict = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
_lowerCamelCase : str = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCamelCase : List[Any] = (torch.cuda.memory_allocated(lowercase__ ) - start_measures[str(lowercase__ )]) / 2**20
_lowerCamelCase : Union[str, Any] = (torch.cuda.max_memory_allocated(lowercase__ ) - start_measures[str(lowercase__ )]) / 2**20
return measures
def _snake_case ( lowercase__ , lowercase__ ):
print(f'''{description}:''' )
print(f'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(lowercase__ )]:.2f}MiB''' )
_lowerCamelCase : Any = measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 96
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__snake_case = input('''Enter image url: ''').strip()
print(F"""Downloading image from {url} ...""")
__snake_case = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__snake_case = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__snake_case = requests.get(image_url).content
__snake_case = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 97
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305
| 0
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "align_text_model"
def __init__( self : Tuple ,lowerCamelCase__ : str=30_522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Union[str, Any]=3_072 ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Tuple=512 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : Optional[Any]=1e-12 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Dict=True ,**lowerCamelCase__ : Dict ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = pad_token_id
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : List[str] ):
cls._set_token_in_kwargs(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "align_vision_model"
def __init__( self : Any ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 600 ,lowerCamelCase__ : float = 2.0 ,lowerCamelCase__ : float = 3.1 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,lowerCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] ,lowerCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] ,lowerCamelCase__ : List[int] = [] ,lowerCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,lowerCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,lowerCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,lowerCamelCase__ : float = 0.2_5 ,lowerCamelCase__ : str = "swish" ,lowerCamelCase__ : int = 2_560 ,lowerCamelCase__ : str = "mean" ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : float = 0.0_0_1 ,lowerCamelCase__ : float = 0.9_9 ,lowerCamelCase__ : float = 0.2 ,**lowerCamelCase__ : Tuple ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = width_coefficient
UpperCAmelCase__ = depth_coefficient
UpperCAmelCase__ = depth_divisor
UpperCAmelCase__ = kernel_sizes
UpperCAmelCase__ = in_channels
UpperCAmelCase__ = out_channels
UpperCAmelCase__ = depthwise_padding
UpperCAmelCase__ = strides
UpperCAmelCase__ = num_block_repeats
UpperCAmelCase__ = expand_ratios
UpperCAmelCase__ = squeeze_expansion_ratio
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dim
UpperCAmelCase__ = pooling_type
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = batch_norm_eps
UpperCAmelCase__ = batch_norm_momentum
UpperCAmelCase__ = drop_connect_rate
UpperCAmelCase__ = sum(lowerCamelCase__ ) * 4
@classmethod
def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : Optional[int] ):
cls._set_token_in_kwargs(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "align"
snake_case__ = True
def __init__( self : Tuple ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : int=640 ,lowerCamelCase__ : List[Any]=1.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,**lowerCamelCase__ : str ,):
super().__init__(**lowerCamelCase__ )
if text_config is None:
UpperCAmelCase__ = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
UpperCAmelCase__ = AlignTextConfig(**lowerCamelCase__ )
UpperCAmelCase__ = AlignVisionConfig(**lowerCamelCase__ )
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = temperature_init_value
UpperCAmelCase__ = initializer_range
@classmethod
def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : AlignTextConfig ,lowerCamelCase__ : AlignVisionConfig ,**lowerCamelCase__ : Optional[int] ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 98
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int = None , _UpperCAmelCase : int = None ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = pad_token_id
lowercase__ = max_length
lowercase__ = vocab
lowercase__ = merges
lowercase__ = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : GPTaTokenizer , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [""" """.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
lowercase__ = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Any , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return cls(**_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tf_tokenizer(_UpperCAmelCase )
lowercase__ = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__ = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 305
| 0
|
def A_ ( A__ ) -> bool:
if not isinstance(A__ , A__ ):
a__ : List[str] = F'Input value of [number={number}] must be an integer'
raise TypeError(A__ )
if number < 0:
return False
a__ : int = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
A : Optional[int] = 1_0_0
A : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase ( __magic_name__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ = set()
lowercase__ = 42
lowercase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( __magic_name__ : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __magic_name__ ):
if len(partition(__magic_name__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 305
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = n + 1 # maximum limit
for a in range(2 , UpperCamelCase_ ):
for b in range(2 , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = a**b # calculates the current power
collect_powers.add(UpperCamelCase_ ) # adds the result to the set
return len(UpperCamelCase_ )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 100
|
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0] * len(__magic_name__ )
lowercase__ = []
lowercase__ = [1] * len(__magic_name__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__magic_name__ )
print(max(__magic_name__ ) )
# Adjacency list of Graph
A : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 305
| 0
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A__ ( self):
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
lowercase = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase , lowercase = FlaxStableDiffusionInpaintPipeline.from_pretrained(A__ ,safety_checker=A__)
lowercase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase = jax.random.PRNGKey(0)
lowercase = 5_0
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = num_samples * [init_image]
lowercase = num_samples * [mask_image]
lowercase , lowercase , lowercase = pipeline.prepare_inputs(A__ ,A__ ,A__)
# shard inputs and rng
lowercase = replicate(A__)
lowercase = jax.random.split(A__ ,jax.device_count())
lowercase = shard(A__)
lowercase = shard(A__)
lowercase = shard(A__)
lowercase = pipeline(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,jit=A__)
lowercase = output.images.reshape(A__ ,5_1_2 ,5_1_2 ,3)
lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase = jnp.asarray(jax.device_get(image_slice.flatten()))
lowercase = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084])
print(f'output_slice: {output_slice}')
assert jnp.abs(output_slice - expected_slice).max() < 1E-2
| 101
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = gather(__magic_name__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = [state.process_index]
lowercase__ = gather_object(__magic_name__ )
assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = broadcast(__magic_name__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
if state.is_main_process:
lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase__ = torch.arange(state.num_processes ).to(state.device )
lowercase__ = pad_across_processes(__magic_name__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """sum""" )
lowercase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """mean""" )
lowercase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
main()
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__magic_name__ )
state.print("""testing gather_object""" )
test_gather_object(__magic_name__ )
state.print("""testing broadcast""" )
test_broadcast(__magic_name__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__magic_name__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(__magic_name__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(__magic_name__ )
if __name__ == "__main__":
main()
| 305
| 0
|
"""simple docstring"""
def lowercase ( _snake_case : list[list[int]] , _snake_case : int , _snake_case : int , _snake_case : set ) ->int:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = len(_snake_case ), len(grid[0] )
if (
min(_snake_case , _snake_case ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__snake_case : Tuple = 0
count += depth_first_search(_snake_case , row + 1 , _snake_case , _snake_case )
count += depth_first_search(_snake_case , row - 1 , _snake_case , _snake_case )
count += depth_first_search(_snake_case , _snake_case , col + 1 , _snake_case )
count += depth_first_search(_snake_case , _snake_case , col - 1 , _snake_case )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
| 0
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A__ : str = data_utils.TransfoXLTokenizer
A__ : Any = data_utils.TransfoXLCorpus
A__ : str = data_utils
A__ : int = data_utils
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : str ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__UpperCamelCase ,'''rb''' ) as fp:
lowerCAmelCase_ : Union[str, Any] = pickle.load(__UpperCamelCase ,encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCAmelCase_ : List[str] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowerCAmelCase_ : Union[str, Any] = corpus.vocab.__dict__
torch.save(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' ,__UpperCamelCase )
lowerCAmelCase_ : List[Any] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__UpperCamelCase ,__UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCAmelCase_ : Optional[Any] = os.path.abspath(__UpperCamelCase )
lowerCAmelCase_ : Dict = os.path.abspath(__UpperCamelCase )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCAmelCase_ : str = TransfoXLConfig()
else:
lowerCAmelCase_ : int = TransfoXLConfig.from_json_file(__UpperCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase_ : Optional[int] = TransfoXLLMHeadModel(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = load_tf_weights_in_transfo_xl(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Save pytorch-model
lowerCAmelCase_ : str = os.path.join(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : List[str] = os.path.join(__UpperCamelCase ,__UpperCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__UpperCamelCase )}""" )
torch.save(model.state_dict() ,__UpperCamelCase )
print(f"""Save configuration file to {os.path.abspath(__UpperCamelCase )}""" )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
A__ : Union[str, Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 103
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 0
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ ,lowercase__ ,lowercase__ ) ,2_1_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.assertRaisesRegex(lowercase__ ,'''Weight can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : str ):
self.assertRaisesRegex(lowercase__ ,'''Profit can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
self.assertRaisesRegex(
lowercase__ ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 104
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ )
a : Dict = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase__ , os.listdir(lowerCAmelCase__ )[0] , "snapshots" ) )]
a : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> int:
a, a : Any = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCAmelCase__ )
a : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : int = jax.random.PRNGKey(0 )
a : Dict = 4
a : List[str] = jax.device_count()
a : Optional[Any] = num_samples * [prompt]
a : str = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Tuple = replicate(lowerCAmelCase__ )
a : Optional[Any] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = shard(lowerCAmelCase__ )
a : List[Any] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
a : Optional[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase__ ) == num_samples
def __a ( self ) -> Optional[Any]:
a, a : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowerCAmelCase__ )
a : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : Dict = jax.random.PRNGKey(0 )
a : List[Any] = 50
a : Optional[int] = jax.device_count()
a : List[str] = num_samples * [prompt]
a : str = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Tuple = replicate(lowerCAmelCase__ )
a : Optional[int] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : int = shard(lowerCAmelCase__ )
a : int = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def __a ( self ) -> int:
a, a : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ )
a : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : int = jax.random.PRNGKey(0 )
a : Union[str, Any] = 50
a : str = jax.device_count()
a : str = num_samples * [prompt]
a : Union[str, Any] = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Any = replicate(lowerCAmelCase__ )
a : List[str] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = shard(lowerCAmelCase__ )
a : Tuple = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def __a ( self ) -> Optional[int]:
a, a : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
a : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : Optional[int] = jax.random.PRNGKey(0 )
a : Optional[Any] = 50
a : List[str] = jax.device_count()
a : Optional[int] = num_samples * [prompt]
a : int = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Union[str, Any] = replicate(lowerCAmelCase__ )
a : Optional[int] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = shard(lowerCAmelCase__ )
a : List[Any] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def __a ( self ) -> Dict:
a : List[str] = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
a, a : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
a : Union[str, Any] = scheduler.create_state()
a : Tuple = scheduler_state
a : List[str] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : List[str] = jax.random.PRNGKey(0 )
a : List[str] = 50
a : Dict = jax.device_count()
a : Tuple = num_samples * [prompt]
a : Optional[int] = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Optional[int] = replicate(lowerCAmelCase__ )
a : Optional[Any] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : Union[str, Any] = shard(lowerCAmelCase__ )
a : Tuple = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def __a ( self ) -> str:
a : Dict = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : List[str] = jax.device_count()
a : Any = num_samples * [prompt]
a : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase__ )
a, a : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , )
a : Optional[int] = replicate(lowerCAmelCase__ )
a : Dict = pipeline.prepare_inputs(lowerCAmelCase__ )
a : int = shard(lowerCAmelCase__ )
a : str = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
a : Any = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
a, a : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , use_memory_efficient_attention=lowerCAmelCase__ , )
a : Optional[int] = replicate(lowerCAmelCase__ )
a : Union[str, Any] = pipeline.prepare_inputs(lowerCAmelCase__ )
a : Any = shard(lowerCAmelCase__ )
a : List[str] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
a : Union[str, Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 105
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['DPTFeatureExtractor']
A : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 305
| 0
|
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = FlaxAutoencoderKL
@property
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Dict = 3
lowerCAmelCase__ : str = (3_2, 3_2)
lowerCAmelCase__ : str = jax.random.PRNGKey(0 )
lowerCAmelCase__ : str = jax.random.uniform(lowercase_ ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
| 106
|
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305
| 0
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCAmelCase : str = Lock()
def __magic_name__ ( A : Optional[Any], A : Any, A : Union[str, Any], A : List[str], A : int, A : Optional[int], A : Dict ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
a = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
a = min(A, A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
a = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
a = max(A, A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A : int ):
'''simple docstring'''
a = []
a = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
a = Pipe()
a = Pipe()
process_array_.append(
Process(
target=A, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
a = temp_rs
a = temp_rr
for i in range(1, len(A ) - 1 ):
a = Pipe()
a = Pipe()
process_array_.append(
Process(
target=A, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
a = temp_rs
a = temp_rr
process_array_.append(
Process(
target=A, args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(A ) ):
a = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ):
'''simple docstring'''
a = list(range(10, 0, -1 ) )
print("Initial List" )
print(*A )
a = odd_even_transposition(A )
print("Sorted List\n" )
print(*A )
if __name__ == "__main__":
main()
| 107
|
A : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 305
| 0
|
"""simple docstring"""
from math import ceil
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = list(range(0 , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Union[str, Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCAmelCase : Optional[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(SCREAMING_SNAKE_CASE )
# Missing blocks
lowerCAmelCase : Any = [i for i in blocks if i not in device_map_blocks]
lowerCAmelCase : Any = [i for i in device_map_blocks if i not in blocks]
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : List[Any] = list(range(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Union[str, Any] = int(ceil(n_layers / len(SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase : int = [layers[i : i + n_blocks] for i in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
| 108
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionDiffEditPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
lowercase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
lowercase__ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_zero=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase__ = CLIPTextModel(_UpperCAmelCase )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=0 ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor((1, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe(**_UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__ = self.pipeline_class.from_pretrained(_UpperCAmelCase )
pipe_loaded.to(_UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_UpperCAmelCase , _UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe_loaded(**_UpperCAmelCase )[0]
lowercase__ = np.abs(output - output_loaded ).max()
self.assertLess(_UpperCAmelCase , 1E-4 )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_mask_inputs(_UpperCAmelCase )
lowercase__ = pipe.generate_mask(**_UpperCAmelCase )
lowercase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase__ = np.array([0] * 9 )
lowercase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase__ = DPMSolverMultistepScheduler(**_UpperCAmelCase )
lowercase__ = DPMSolverMultistepInverseScheduler(**_UpperCAmelCase )
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCamelCase__ (cls : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase__ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase__ = raw_image
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase , num_inference_steps=25 , ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 305
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A: Any = logging.get_logger(__name__)
A: str = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = 'perceiver'
def __init__( self , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=26 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="kv" , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=262 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=56 , _SCREAMING_SNAKE_CASE=[368, 496] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=1920 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 16, 224, 224] , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = num_latents
UpperCAmelCase : Any = d_latents
UpperCAmelCase : Any = d_model
UpperCAmelCase : int = num_blocks
UpperCAmelCase : Optional[Any] = num_self_attends_per_block
UpperCAmelCase : Any = num_self_attention_heads
UpperCAmelCase : Dict = num_cross_attention_heads
UpperCAmelCase : Dict = qk_channels
UpperCAmelCase : Union[str, Any] = v_channels
UpperCAmelCase : Tuple = cross_attention_shape_for_attention
UpperCAmelCase : Union[str, Any] = self_attention_widening_factor
UpperCAmelCase : int = cross_attention_widening_factor
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Union[str, Any] = layer_norm_eps
UpperCAmelCase : Dict = use_query_residual
# masked language modeling attributes
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[int] = max_position_embeddings
# image classification attributes
UpperCAmelCase : Optional[Any] = image_size
# flow attributes
UpperCAmelCase : str = train_size
# multimodal autoencoding attributes
UpperCAmelCase : Any = num_frames
UpperCAmelCase : Optional[int] = audio_samples_per_frame
UpperCAmelCase : Union[str, Any] = samples_per_patch
UpperCAmelCase : List[Any] = output_shape
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-4
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 40 , _SCREAMING_SNAKE_CASE = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Optional[Any] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : Any = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : List[str] = [""" """.join(["""a"""] ) * seq_length] * batch_size
UpperCAmelCase : Optional[int] = dict(preprocessor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[Any] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase : List[Any] = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = dict(preprocessor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[int] = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 109
|
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Optional[Any] ):
snake_case_ : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
snake_case_ : int = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case_ : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
snake_case_ : int = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
snake_case_ : Any = {'''unk_token''': '''<unk>'''}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
snake_case_ : List[Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
snake_case_ : Dict = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self : List[Any] , **lowercase_ : Optional[int] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_UpperCAmelCase )
def _snake_case ( self : str , **lowercase_ : Optional[int] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_UpperCAmelCase )
def _snake_case ( self : List[str] , **lowercase_ : int ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : str = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Optional[int] ):
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : List[Any] = self.get_rust_tokenizer()
snake_case_ : str = self.get_image_processor()
snake_case_ : Tuple = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
snake_case_ : Tuple = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ : List[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def _snake_case ( self : int ):
snake_case_ : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case_ : Optional[int] = self.get_image_processor(do_normalize=_UpperCAmelCase )
snake_case_ : Union[str, Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _snake_case ( self : int ):
snake_case_ : Optional[int] = self.get_image_processor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : Union[str, Any] = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case_ : int = self.prepare_image_inputs()
snake_case_ : Any = image_processor(_UpperCAmelCase , return_tensors='''np''' )
snake_case_ : List[str] = processor(images=_UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self : Tuple ):
snake_case_ : Union[str, Any] = self.get_image_processor()
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : str = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case_ : int = '''lower newer'''
snake_case_ : Optional[Any] = processor(text=_UpperCAmelCase , return_tensors='''np''' )
snake_case_ : Tuple = tokenizer(_UpperCAmelCase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self : Optional[Any] ):
snake_case_ : str = self.get_image_processor()
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : List[Any] = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case_ : Dict = '''lower newer'''
snake_case_ : List[str] = self.prepare_image_inputs()
snake_case_ : str = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = '''google/owlvit-base-patch32'''
snake_case_ : List[str] = OwlViTProcessor.from_pretrained(_UpperCAmelCase )
snake_case_ : Any = ['''cat''', '''nasa badge''']
snake_case_ : Dict = processor(text=_UpperCAmelCase )
snake_case_ : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _snake_case ( self : int ):
snake_case_ : List[Any] = '''google/owlvit-base-patch32'''
snake_case_ : List[Any] = OwlViTProcessor.from_pretrained(_UpperCAmelCase )
snake_case_ : Tuple = [['''cat''', '''nasa badge'''], ['''person''']]
snake_case_ : Dict = processor(text=_UpperCAmelCase )
snake_case_ : int = 16
snake_case_ : List[str] = len(_UpperCAmelCase )
snake_case_ : Optional[Any] = max([len(_UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _snake_case ( self : Any ):
snake_case_ : Any = '''google/owlvit-base-patch32'''
snake_case_ : Optional[int] = OwlViTProcessor.from_pretrained(_UpperCAmelCase )
snake_case_ : Dict = ['''cat''', '''nasa badge''']
snake_case_ : int = processor(text=_UpperCAmelCase )
snake_case_ : Optional[int] = 16
snake_case_ : Optional[Any] = inputs['''input_ids''']
snake_case_ : Any = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Optional[Any] = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case_ : Any = self.prepare_image_inputs()
snake_case_ : List[Any] = self.prepare_image_inputs()
snake_case_ : str = processor(images=_UpperCAmelCase , query_images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _snake_case ( self : List[Any] ):
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : str = OwlViTProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
snake_case_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : str = processor.batch_decode(_UpperCAmelCase )
snake_case_ : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 264
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = True
A__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 305
| 0
|
'''simple docstring'''
from ....utils import logging
a__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCAmelCase__ ):
def __init__( self , a , a=None , a=20_48 ):
UpperCamelCase__ = config.__dict__
UpperCamelCase__ = modal_hidden_size
if num_labels:
UpperCamelCase__ = num_labels
| 80
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowercase ( __lowercase , __lowercase , __lowercase=None , __lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
_A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
snake_case = OPTConfig
snake_case = {}
snake_case = '''gelu'''
def __init__( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=13 , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=99 , __UpperCAmelCase : str=16 , __UpperCAmelCase : int=2 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Dict=20 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Tuple=16 , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = eos_token_id
_A = pad_token_id
_A = bos_token_id
_A = embed_dim
_A = word_embed_proj_dim
_A = False
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A = tf.concat([input_ids, eos_tensor] , axis=1 )
_A = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_UpperCAmelCase , **self.config_updates , )
_A = prepare_opt_inputs_dict(_UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = TFOPTModel(config=_UpperCAmelCase )
_A = inputs_dict["input_ids"]
_A = input_ids[:1, :]
_A = inputs_dict["attention_mask"][:1, :]
_A = 1
# first forward pass
_A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
_A , _A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_A = tf.concat([input_ids, next_tokens] , axis=-1 )
_A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
_A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_A = output_from_no_past[:, -3:, random_slice_idx]
_A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 )
@require_tf
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case = (TFOPTForCausalLM,) if is_tf_available() else ()
snake_case = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = 10
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = TFOPTModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ):
if hasattr(_UpperCAmelCase , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_UpperCAmelCase , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_A = model_class(config=_UpperCAmelCase )
_A = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings() )
_A = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_UpperCAmelCase )
_A = _get_word_embedding_weight(_UpperCAmelCase , model.get_input_embeddings() )
_A = _get_word_embedding_weight(_UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_A = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _UpperCAmelCase )
# check that weights remain the same after resizing
_A = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_A = False
self.assertTrue(_UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _UpperCAmelCase )
_A = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_A = False
self.assertTrue(_UpperCAmelCase )
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = 99
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_A = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_A = input_ids.shape[0]
_A = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = TFOPTModel.from_pretrained("facebook/opt-350m" )
_A = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_A = tf.not_equal(_UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
_A = model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ).last_hidden_state
_A = (1, 11, 512)
self.assertEqual(output.shape , _UpperCAmelCase )
_A = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4E-3 ) )
_A = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase )
_A = xla_generate(_UpperCAmelCase , _UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
_A = "facebook/opt-350m"
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = TFOPTForCausalLM.from_pretrained(self.path_model )
_A = GPTaTokenizer.from_pretrained(self.path_model )
_A = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_A = tokenizer(_UpperCAmelCase , return_tensors="tf" , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_A = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-4 ) )
_A = tf.function(_UpperCAmelCase , jit_compile=_UpperCAmelCase )
_A = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "facebook/opt-125m"
_A = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_A = []
_A = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
_A = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
for prompt in self.prompts:
_A = tokenizer(_UpperCAmelCase , return_tensors="tf" ).input_ids
_A = model.generate(_UpperCAmelCase , max_length=10 )
_A = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = "facebook/opt-350m"
_A = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
_A = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
_A = "left"
# use different length sentences to test batching
_A = [
"Hello, my dog is a little",
"Today, I",
]
_A = tokenizer(_UpperCAmelCase , return_tensors="tf" , padding=_UpperCAmelCase )
_A = inputs["input_ids"]
_A = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs["attention_mask"] )
_A = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_A = model.generate(input_ids=_UpperCAmelCase )
_A = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_A = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_A = model.generate(input_ids=_UpperCAmelCase , max_length=model.config.max_length - num_paddings )
_A = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase )
_A = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "facebook/opt-350m"
_A = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_A = []
_A = GPTaTokenizer.from_pretrained(_UpperCAmelCase )
_A = TFOPTForCausalLM.from_pretrained(_UpperCAmelCase )
for prompt in self.prompts:
_A = tokenizer(_UpperCAmelCase , return_tensors="tf" ).input_ids
_A = model.generate(_UpperCAmelCase , max_length=10 )
_A = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 79
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = ShapEImgaImgPipeline
A__ = ['''image''']
A__ = ['''image''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
return 8
@property
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ (self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
lowercase__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowercase__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowercase__ = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 305
| 0
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 1_0, """max_num_jobs""": 1}, [range(1_0 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 1_0}, [range(lowercase__ , i + 1 ) for i in range(1_0 )]),
({"""num_shards""": 1, """max_num_jobs""": 1_0}, [range(1 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"""num_shards""": 3, """max_num_jobs""": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( lowercase__ : Any , lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Dict = _distribute_shards(**lowercase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 1_0, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Any = _split_gen_kwargs(lowercase__ , lowercase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( lowercase__ : str , lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(lowercase__ ):
_number_of_shards_in_gen_kwargs(lowercase__ )
else:
lowerCAmelCase_ :List[str] = _number_of_shards_in_gen_kwargs(lowercase__ )
assert out == expected
| 84
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __magic_name__ : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase__ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowercase__ = BeautifulSoup(requests.get(__magic_name__ ).text , """html.parser""" )
lowercase__ = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 305
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> int:
__lowerCamelCase = 384
if "tiny" in model_name:
__lowerCamelCase = [3, 3, 9, 3]
__lowerCamelCase = [96, 192, 384, 768]
if "small" in model_name:
__lowerCamelCase = [3, 3, 27, 3]
__lowerCamelCase = [96, 192, 384, 768]
if "base" in model_name:
__lowerCamelCase = [3, 3, 27, 3]
__lowerCamelCase = [128, 256, 512, 1024]
__lowerCamelCase = 512
if "large" in model_name:
__lowerCamelCase = [3, 3, 27, 3]
__lowerCamelCase = [192, 384, 768, 1536]
__lowerCamelCase = 768
if "xlarge" in model_name:
__lowerCamelCase = [3, 3, 27, 3]
__lowerCamelCase = [256, 512, 1024, 2048]
__lowerCamelCase = 1024
# set label information
__lowerCamelCase = 150
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = '''ade20k-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowerCamelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = ConvNextConfig(
depths=__lowerCAmelCase , hidden_sizes=__lowerCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__lowerCamelCase = UperNetConfig(
backbone_config=__lowerCAmelCase , auxiliary_in_channels=__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , )
return config
def __magic_name__ ( __lowerCAmelCase : str ) -> Optional[Any]:
__lowerCamelCase = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Any:
__lowerCamelCase = dct.pop(__lowerCAmelCase )
__lowerCamelCase = val
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ) -> int:
__lowerCamelCase = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__lowerCamelCase = model_name_to_url[model_name]
__lowerCamelCase = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )['''state_dict''']
__lowerCamelCase = get_upernet_config(__lowerCAmelCase )
__lowerCamelCase = UperNetForSemanticSegmentation(__lowerCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
if "bn" in key:
__lowerCamelCase = key.replace('''bn''' , '''batch_norm''' )
__lowerCamelCase = val
# rename keys
__lowerCamelCase = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# verify on image
__lowerCamelCase = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__lowerCamelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('''RGB''' )
__lowerCamelCase = SegformerImageProcessor()
__lowerCamelCase = processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__lowerCamelCase = model(__lowerCAmelCase )
if model_name == "upernet-convnext-tiny":
__lowerCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__lowerCamelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__lowerCamelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__lowerCamelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__lowerCamelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[F'upernet-convnext-{size}' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 270
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''falcon'''
A__ = ['''past_key_values''']
def __init__(self : str , _UpperCAmelCase : Dict=6_5024 , _UpperCAmelCase : Optional[Any]=4544 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Optional[Any]=71 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=11 , _UpperCAmelCase : Optional[Any]=11 , **_UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("""n_embed""" , _UpperCAmelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 305
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowercase (UpperCAmelCase__ ):
_UpperCamelCase = ["""image_processor""", """tokenizer"""]
_UpperCamelCase = """OwlViTImageProcessor"""
_UpperCamelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , A_=None , A_=None , **A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCAmelCase , )
__lowerCAmelCase : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__lowerCAmelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ) ->int:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__lowerCAmelCase : List[str] = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__lowerCAmelCase : str = []
# Maximum number of queries across batch
__lowerCAmelCase : Dict = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__lowerCAmelCase : Tuple = t + [''' '''] * (max_num_queries - len(_UpperCAmelCase ))
__lowerCAmelCase : Any = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
__lowerCAmelCase : Tuple = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowerCAmelCase : Dict = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowerCAmelCase : Optional[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowerCAmelCase : int = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowerCAmelCase : int = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
__lowerCAmelCase : Dict = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowerCAmelCase : Tuple = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowerCAmelCase : int = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
__lowerCAmelCase : Any = BatchEncoding()
__lowerCAmelCase : Union[str, Any] = input_ids
__lowerCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
__lowerCAmelCase : List[Any] = BatchEncoding()
__lowerCAmelCase : List[str] = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__lowerCAmelCase : List[str] = query_pixel_values
if images is not None:
__lowerCAmelCase : Tuple = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__lowerCAmelCase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowerCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->List[str]:
'''simple docstring'''
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->Any:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->str:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _UpperCAmelCase , )
return self.image_processor
| 275
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 335
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305
| 0
|
"""simple docstring"""
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = [0] * len(A_ )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Tuple = [1] * len(A_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A_ ) ):
if indegree[i] == 0:
queue.append(A_ )
while queue:
_lowerCamelCase : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCamelCase : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(A_ )
print(max(A_ ) )
# Adjacency list of Graph
lowerCAmelCase__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 72
|
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
a__ = ['bert-base-uncased', 'bert-base-cased']
a__ = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class UpperCAmelCase_ ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , _a ) -> Any:
super().__init__()
_a : Dict = tokenizer
_a : List[str] = AutoConfig.from_pretrained(_UpperCAmelCase )
_a : Union[str, Any] = TFAutoModel.from_config(_UpperCAmelCase )
def __lowercase ( self , _a ) -> Union[str, Any]:
_a : Union[str, Any] = self.tokenizer(_UpperCAmelCase )
_a : str = self.bert(**_UpperCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
super().setUp()
_a : Dict = [
BertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_a : Tuple = [TFBertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_UpperCAmelCase , use_fast_bert_tokenizer=_UpperCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_a : str = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
_a : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __lowercase ( self ) -> List[str]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_a : int = tokenizer(_UpperCAmelCase , return_tensors='''tf''' , padding='''longest''' )
_a : List[str] = tf_tokenizer(_UpperCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __lowercase ( self ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
_a : Optional[Any] = tf_tokenizer(self.paired_sentences )
_a : str = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __lowercase ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
_a : Union[str, Any] = tf.function(_UpperCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
_a : Union[str, Any] = tf.constant(_UpperCAmelCase )
_a : List[str] = compiled_tokenizer(_UpperCAmelCase )
_a : Optional[int] = tf_tokenizer(_UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __lowercase ( self ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
_a : Optional[Any] = ModelToSave(tokenizer=_UpperCAmelCase )
_a : Tuple = tf.convert_to_tensor(self.test_sentences )
_a : Any = model(_UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_a : Any = Path(_UpperCAmelCase ) / '''saved.model'''
model.save(_UpperCAmelCase )
_a : Optional[int] = tf.keras.models.load_model(_UpperCAmelCase )
_a : Optional[int] = loaded_model(_UpperCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 235
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase__ : str = '▁'
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Dict = ['''input_ids''', '''attention_mask''']
_A : Union[str, Any] = CamembertTokenizer
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : Any="</s>" , lowerCAmelCase__ : List[Any]="</s>" , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Optional[int]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCAmelCase__ : Any , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__SCREAMING_SNAKE_CASE : Dict = vocab_file
__SCREAMING_SNAKE_CASE : List[Any] = False if not self.vocab_file else True
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : Any = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 112
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
__magic_name__: Any = "realm"
def __init__( self : Any , _A : Tuple=30522 , _A : Optional[Any]=768 , _A : Optional[Any]=128 , _A : Optional[int]=12 , _A : Any=12 , _A : List[Any]=8 , _A : List[str]=3072 , _A : Any="gelu_new" , _A : Union[str, Any]=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[Any]=512 , _A : Union[str, Any]=2 , _A : Optional[Any]=0.0_2 , _A : Dict=1E-12 , _A : List[Any]=256 , _A : Optional[int]=10 , _A : Union[str, Any]=1E-3 , _A : int=5 , _A : str=320 , _A : str=13353718 , _A : List[Any]=5000 , _A : Optional[Any]=1 , _A : str=0 , _A : Tuple=2 , **_A : List[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
# Common config
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Tuple = max_position_embeddings
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = retriever_proj_size
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Optional[int] = num_candidates
snake_case_ : int = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Any = initializer_range
snake_case_ : Dict = type_vocab_size
snake_case_ : Any = layer_norm_eps
# Reader config
snake_case_ : str = span_hidden_size
snake_case_ : Optional[int] = max_span_width
snake_case_ : Optional[int] = reader_layer_norm_eps
snake_case_ : List[Any] = reader_beam_size
snake_case_ : Optional[int] = reader_seq_len
# Retrieval config
snake_case_ : List[str] = num_block_records
snake_case_ : List[str] = searcher_beam_size
| 327
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int = None , _UpperCAmelCase : int = None ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = pad_token_id
lowercase__ = max_length
lowercase__ = vocab
lowercase__ = merges
lowercase__ = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : GPTaTokenizer , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [""" """.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
lowercase__ = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Any , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return cls(**_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tf_tokenizer(_UpperCAmelCase )
lowercase__ = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__ = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 305
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : List[str] = KandinskyVaaInpaintPipeline
_lowerCAmelCase : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_lowerCAmelCase : Tuple = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_lowerCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCAmelCase : int = False
@property
def _snake_case ( self : Tuple ):
return 32
@property
def _snake_case ( self : List[str] ):
return 32
@property
def _snake_case ( self : Dict ):
return self.time_input_dim
@property
def _snake_case ( self : List[str] ):
return self.time_input_dim * 4
@property
def _snake_case ( self : Optional[int] ):
return 100
@property
def _snake_case ( self : Dict ):
torch.manual_seed(0 )
snake_case_ : str = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case_ : Tuple = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def _snake_case ( self : int ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self : int ):
snake_case_ : Dict = self.dummy_unet
snake_case_ : Any = self.dummy_movq
snake_case_ : int = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_UpperCAmelCase , )
snake_case_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _snake_case ( self : Optional[int] , lowercase_ : int , lowercase_ : List[Any]=0 ):
snake_case_ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
# create init_image
snake_case_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
snake_case_ : Any = np.ones((64, 64) , dtype=np.floataa )
snake_case_ : Dict = 0
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ : List[Any] = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ : Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ : List[str] = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = '''cpu'''
snake_case_ : Union[str, Any] = self.get_dummy_components()
snake_case_ : Any = self.pipeline_class(**_UpperCAmelCase )
snake_case_ : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ : int = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
snake_case_ : Tuple = output.images
snake_case_ : Tuple = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
snake_case_ : Any = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _snake_case ( self : int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Any ):
snake_case_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
snake_case_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case_ : int = np.ones((768, 768) , dtype=np.floataa )
snake_case_ : Optional[int] = 0
snake_case_ : Optional[Any] = '''a hat'''
snake_case_ : int = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
snake_case_ : Tuple = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
snake_case_ : Tuple = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case_, snake_case_ : Tuple = pipe_prior(
_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case_ : Optional[int] = pipeline(
image=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
snake_case_ : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 264
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
A : Optional[int] = 1_0_0
A : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase ( __magic_name__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ = set()
lowercase__ = 42
lowercase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( __magic_name__ : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __magic_name__ ):
if len(partition(__magic_name__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 305
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a__ : int = 'docs/source/en/_toctree.yml'
def _UpperCamelCase ( __A ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = defaultdict(__A )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCamelCase__ = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ = []
for duplicate_key in duplicates:
UpperCamelCase__ = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__A ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__A , key=lambda __A : s["title"].lower() )
def _UpperCamelCase ( __A=False ) -> Tuple:
'''simple docstring'''
with open(__A , encoding="utf-8" ) as f:
UpperCamelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ = content[api_idx]["sections"]
# Then to the model doc
UpperCamelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCamelCase__ = api_doc[model_idx]["sections"]
UpperCamelCase__ = [(idx, section) for idx, section in enumerate(__A ) if "sections" in section]
UpperCamelCase__ = False
for idx, modality_doc in modalities_docs:
UpperCamelCase__ = modality_doc["sections"]
UpperCamelCase__ = clean_model_doc_toc(__A )
if old_modality_doc != new_modality_doc:
UpperCamelCase__ = True
if overwrite:
UpperCamelCase__ = new_modality_doc
if diff:
if overwrite:
UpperCamelCase__ = model_doc
UpperCamelCase__ = api_doc
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__A , allow_unicode=__A ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a__ : Any = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 80
|
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0] * len(__magic_name__ )
lowercase__ = []
lowercase__ = [1] * len(__magic_name__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__magic_name__ )
print(max(__magic_name__ ) )
# Adjacency list of Graph
A : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 305
| 0
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCamelCase_ = 10
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
for i in range(__lowercase , __lowercase ):
if array[i] == target:
return i
return -1
def __lowercase ( __lowercase , __lowercase ) -> int:
'''simple docstring'''
_A = 0
_A = len(__lowercase )
while left <= right:
if right - left < precision:
return lin_search(__lowercase , __lowercase , __lowercase , __lowercase )
_A = (left + right) // 3 + 1
_A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_A = one_third - 1
elif array[two_third] < target:
_A = two_third + 1
else:
_A = one_third + 1
_A = two_third - 1
else:
return -1
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(__lowercase , __lowercase , __lowercase , __lowercase )
_A = (left + right) // 3 + 1
_A = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowercase , one_third - 1 , __lowercase , __lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __lowercase , __lowercase , __lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __lowercase , __lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase_ = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCamelCase_ = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase_ = ite_ternary_search(collection, target)
lowerCamelCase_ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('''Not found''')
| 79
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = gather(__magic_name__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = [state.process_index]
lowercase__ = gather_object(__magic_name__ )
assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = broadcast(__magic_name__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
if state.is_main_process:
lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase__ = torch.arange(state.num_processes ).to(state.device )
lowercase__ = pad_across_processes(__magic_name__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """sum""" )
lowercase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """mean""" )
lowercase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
main()
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__magic_name__ )
state.print("""testing gather_object""" )
test_gather_object(__magic_name__ )
state.print("""testing broadcast""" )
test_broadcast(__magic_name__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__magic_name__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(__magic_name__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(__magic_name__ )
if __name__ == "__main__":
main()
| 305
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Tuple = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :Union[str, Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :int = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :List[Any] = config["""lr"""]
lowerCAmelCase_ :Optional[Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :str = int(config["""seed"""] )
lowerCAmelCase_ :Dict = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[Any] = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :int = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :Optional[Any] = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :Union[str, Any] = 0
# Now we train the model
lowerCAmelCase_ :List[str] = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :List[Any] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.loss
lowerCAmelCase_ :str = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCAmelCase_ :Tuple = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Union[str, Any] = model(**lowercase__ )
lowerCAmelCase_ :str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
lowerCAmelCase_ :str = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
lowerCAmelCase_ :Dict = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowercase__ , default=lowercase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=3 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 84
|
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
a__ : List[str] = """table-transformer"""
a__ : Tuple = ["""past_key_values"""]
a__ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : str=1_00 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : Any=20_48 , SCREAMING_SNAKE_CASE__ : Optional[Any]=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Tuple=20_48 , SCREAMING_SNAKE_CASE__ : Optional[int]=8 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]="relu" , SCREAMING_SNAKE_CASE__ : Tuple=2_56 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1.0 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : str="sine" , SCREAMING_SNAKE_CASE__ : Optional[int]="resnet50" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , **SCREAMING_SNAKE_CASE__ : int , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowerCamelCase = backbone_config.get('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None, None, None
__lowerCamelCase = use_timm_backbone
__lowerCamelCase = backbone_config
__lowerCamelCase = num_channels
__lowerCamelCase = num_queries
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = encoder_layers
__lowerCamelCase = auxiliary_loss
__lowerCamelCase = position_embedding_type
__lowerCamelCase = backbone
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = dilation
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = mask_loss_coefficient
__lowerCamelCase = dice_loss_coefficient
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ) -> int:
return self.encoder_attention_heads
@property
def __A ( self : List[str] ) -> int:
return self.d_model
class lowerCAmelCase__ ( UpperCAmelCase__ ):
a__ : Dict = version.parse("""1.11""" )
@property
def __A ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __A ( self : int ) -> float:
return 1e-5
@property
def __A ( self : Any ) -> int:
return 12
| 270
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowercase (UpperCAmelCase__ ):
_UpperCamelCase = """informer"""
_UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = None , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 0.05 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_=True , A_ = "prob" , A_ = 5 , A_ = True , **A_ , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = prediction_length
__lowerCAmelCase : int = context_length or prediction_length
__lowerCAmelCase : List[Any] = distribution_output
__lowerCAmelCase : Optional[Any] = loss
__lowerCAmelCase : str = input_size
__lowerCAmelCase : str = num_time_features
__lowerCAmelCase : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__lowerCAmelCase : Optional[int] = scaling
__lowerCAmelCase : str = num_dynamic_real_features
__lowerCAmelCase : List[Any] = num_static_real_features
__lowerCAmelCase : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowerCAmelCase : Optional[int] = cardinality
else:
__lowerCAmelCase : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowerCAmelCase : Dict = embedding_dimension
else:
__lowerCAmelCase : Any = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase : int = input_size * len(self.lags_sequence ) + self._number_of_features
__lowerCAmelCase : Union[str, Any] = d_model
__lowerCAmelCase : Tuple = encoder_attention_heads
__lowerCAmelCase : Union[str, Any] = decoder_attention_heads
__lowerCAmelCase : Union[str, Any] = encoder_ffn_dim
__lowerCAmelCase : int = decoder_ffn_dim
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : Union[str, Any] = decoder_layers
__lowerCAmelCase : List[Any] = dropout
__lowerCAmelCase : Dict = attention_dropout
__lowerCAmelCase : List[Any] = activation_dropout
__lowerCAmelCase : int = encoder_layerdrop
__lowerCAmelCase : List[str] = decoder_layerdrop
__lowerCAmelCase : int = activation_function
__lowerCAmelCase : List[Any] = init_std
__lowerCAmelCase : Dict = use_cache
# Informer
__lowerCAmelCase : Dict = attention_type
__lowerCAmelCase : List[Any] = sampling_factor
__lowerCAmelCase : Tuple = distil
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 275
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
| 0
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_ : Tuple = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( UpperCAmelCase_ : Tuple ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
if args.student_type == "roberta":
A__ = False
elif args.student_type == "gpt2":
A__ = False
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if args.student_type == "roberta":
A__ = False
def _snake_case ( ):
A__ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=UpperCAmelCase_ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=UpperCAmelCase_ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=UpperCAmelCase_ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=UpperCAmelCase_ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=UpperCAmelCase_ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=UpperCAmelCase_ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=UpperCAmelCase_ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=UpperCAmelCase_ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=UpperCAmelCase_ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=UpperCAmelCase_ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=UpperCAmelCase_ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=UpperCAmelCase_ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=UpperCAmelCase_ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=UpperCAmelCase_ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=UpperCAmelCase_ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=UpperCAmelCase_ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase_ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCAmelCase_ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=UpperCAmelCase_ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCAmelCase_ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=UpperCAmelCase_ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=UpperCAmelCase_ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=UpperCAmelCase_ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=UpperCAmelCase_ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCAmelCase_ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=UpperCAmelCase_ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=UpperCAmelCase_ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=UpperCAmelCase_ , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=UpperCAmelCase_ , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=UpperCAmelCase_ , default=4000 , help="""Checkpoint interval.""" )
A__ = parser.parse_args()
sanity_checks(UpperCAmelCase_ )
# ARGS #
init_gpu_params(UpperCAmelCase_ )
set_seed(UpperCAmelCase_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(UpperCAmelCase_ ) , UpperCAmelCase_ , indent=4 )
git_log(args.dump_path )
A__ , A__ , A__ = MODEL_CLASSES[args.student_type]
A__ , A__ , A__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
A__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
A__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
A__ = tokenizer.all_special_tokens.index(UpperCAmelCase_ )
A__ = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
A__ = special_tok_ids
A__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , """rb""" ) as fp:
A__ = pickle.load(UpperCAmelCase_ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , """rb""" ) as fp:
A__ = pickle.load(UpperCAmelCase_ )
A__ = np.maximum(UpperCAmelCase_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
A__ = 0.0 # do not predict special tokens
A__ = torch.from_numpy(UpperCAmelCase_ )
else:
A__ = None
A__ = LmSeqsDataset(params=UpperCAmelCase_ , data=UpperCAmelCase_ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
A__ = student_config_class.from_pretrained(args.student_config )
A__ = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
A__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCAmelCase_ )
else:
A__ = student_model_class(UpperCAmelCase_ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("""Student loaded.""" )
# TEACHER #
A__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCAmelCase_ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCAmelCase_ , UpperCAmelCase_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
A__ = Distiller(
params=UpperCAmelCase_ , dataset=UpperCAmelCase_ , token_probs=UpperCAmelCase_ , student=UpperCAmelCase_ , teacher=UpperCAmelCase_ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 335
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['DPTFeatureExtractor']
A : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 305
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
lowerCAmelCase__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __snake_case :
snake_case__ : Any = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"})
snake_case__ : Optional[Any] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."})
snake_case__ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={"help": "The column name of the images in the files. If not set, will try to use \'image\' or \'img\'."} , )
snake_case__ : Optional[int] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."})
snake_case__ : int = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."})
snake_case__ : int = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."})
snake_case__ : Union[str, Any] = field(default=3_2 , metadata={"help": "The size of the square patches to use for masking."})
snake_case__ : Optional[int] = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
snake_case__ : str = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = {}
if self.train_dir is not None:
_lowerCamelCase : Any = self.train_dir
if self.validation_dir is not None:
_lowerCamelCase : Optional[int] = self.validation_dir
_lowerCamelCase : str = data_files if data_files else None
@dataclass
class __snake_case :
snake_case__ : Tuple = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don\'t set if you want to train a model from scratch."
)
} , )
snake_case__ : str = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__)} , )
snake_case__ : Tuple = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"})
snake_case__ : str = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
snake_case__ : Dict = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
snake_case__ : List[Any] = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : Tuple = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."})
snake_case__ : List[Any] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
snake_case__ : Dict = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
snake_case__ : List[str] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
snake_case__ : Optional[Any] = field(
default=UpperCAmelCase__ , metadata={"help": "Stride to use for the encoder."} , )
class __snake_case :
def __init__( self : List[Any] , __lowerCAmelCase : Dict=1_9_2 , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=0.6 ):
"""simple docstring"""
_lowerCamelCase : List[str] = input_size
_lowerCamelCase : Optional[int] = mask_patch_size
_lowerCamelCase : str = model_patch_size
_lowerCamelCase : List[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
_lowerCamelCase : Tuple = self.input_size // self.mask_patch_size
_lowerCamelCase : Optional[int] = self.mask_patch_size // self.model_patch_size
_lowerCamelCase : List[str] = self.rand_size**2
_lowerCamelCase : str = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
_lowerCamelCase : int = np.zeros(self.token_count , dtype=_UpperCAmelCase )
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : Tuple = mask.reshape((self.rand_size, self.rand_size) )
_lowerCamelCase : Optional[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = torch.stack([example['''pixel_values'''] for example in examples] )
_lowerCamelCase : List[Any] = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''', A_, A_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowerCamelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
_lowerCamelCase : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
_lowerCamelCase : Tuple = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, A_ ) and data_args.train_val_split > 0.0:
_lowerCamelCase : Any = ds['''train'''].train_test_split(data_args.train_val_split )
_lowerCamelCase : List[str] = split['''train''']
_lowerCamelCase : Any = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : int = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_lowerCamelCase : int = AutoConfig.from_pretrained(model_args.config_name_or_path, **A_ )
elif model_args.model_name_or_path:
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path, **A_ )
else:
_lowerCamelCase : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(A_, '''decoder_type''' ):
_lowerCamelCase : Optional[Any] = '''simmim'''
# adapt config
_lowerCamelCase : Optional[int] = model_args.image_size if model_args.image_size is not None else config.image_size
_lowerCamelCase : List[Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_lowerCamelCase : Any = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.image_processor_name, **A_ )
elif model_args.model_name_or_path:
_lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path, **A_ )
else:
_lowerCamelCase : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_lowerCamelCase : Union[str, Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_lowerCamelCase : int = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=A_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('''Training new model from scratch''' )
_lowerCamelCase : Dict = AutoModelForMaskedImageModeling.from_config(A_ )
if training_args.do_train:
_lowerCamelCase : List[Any] = ds['''train'''].column_names
else:
_lowerCamelCase : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
_lowerCamelCase : List[str] = data_args.image_column_name
elif "image" in column_names:
_lowerCamelCase : Union[str, Any] = '''image'''
elif "img" in column_names:
_lowerCamelCase : int = '''img'''
else:
_lowerCamelCase : Dict = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_lowerCamelCase : str = Compose(
[
Lambda(lambda A_ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
# create mask generator
_lowerCamelCase : Optional[Any] = MaskGenerator(
input_size=model_args.image_size, mask_patch_size=data_args.mask_patch_size, model_patch_size=model_args.patch_size, mask_ratio=data_args.mask_ratio, )
def preprocess_images(A_ : Any ):
_lowerCamelCase : Dict = [transforms(A_ ) for image in examples[image_column_name]]
_lowerCamelCase : Optional[int] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
_lowerCamelCase : int = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(A_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
_lowerCamelCase : Tuple = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(A_ )
# Initialize our trainer
_lowerCamelCase : Dict = Trainer(
model=A_, args=A_, train_dataset=ds['''train'''] if training_args.do_train else None, eval_dataset=ds['''validation'''] if training_args.do_eval else None, tokenizer=A_, data_collator=A_, )
# Training
if training_args.do_train:
_lowerCamelCase : List[str] = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : str = last_checkpoint
_lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : int = trainer.evaluate()
trainer.log_metrics('''eval''', A_ )
trainer.save_metrics('''eval''', A_ )
# Write model card and (optionally) push to hub
_lowerCamelCase : Any = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A_ )
else:
trainer.create_model_card(**A_ )
if __name__ == "__main__":
main()
| 72
|
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305
| 0
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=9_9 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=4 , ) -> Dict:
_a : int = parent
_a : List[str] = batch_size
_a : Optional[int] = seq_length
_a : Any = is_training
_a : Tuple = use_attention_mask
_a : Any = use_token_type_ids
_a : Tuple = use_labels
_a : Optional[int] = vocab_size
_a : str = hidden_size
_a : Optional[int] = num_hidden_layers
_a : List[str] = num_attention_heads
_a : List[str] = intermediate_size
_a : Optional[int] = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : Union[str, Any] = type_vocab_size
_a : Any = type_sequence_label_size
_a : Any = initializer_range
_a : Tuple = num_choices
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[str] = None
if self.use_attention_mask:
_a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_a : Any = None
if self.use_token_type_ids:
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] = config_and_inputs
_a : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = FlaxRoFormerModelTester(self )
@slow
def __lowercase ( self ) -> int:
for model_class_name in self.all_model_classes:
_a : int = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_UpperCAmelCase )
_a : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> Dict:
_a : Dict = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
_a : Optional[int] = jnp.array([[0, 1, 2, 3, 4, 5]] )
_a : Tuple = model(_UpperCAmelCase )[0]
_a : Tuple = 5_0_0_0_0
_a : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
_a : List[str] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 235
|
A : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 305
| 0
|
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCamelCase :
'''simple docstring'''
_A : Tuple = None
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
__SCREAMING_SNAKE_CASE : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _UpperCAmelCase )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_UpperCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extraction_class()
self.assertIsNotNone(_UpperCAmelCase )
| 112
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionDiffEditPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
lowercase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
lowercase__ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_zero=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase__ = CLIPTextModel(_UpperCAmelCase )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=0 ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor((1, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe(**_UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__ = self.pipeline_class.from_pretrained(_UpperCAmelCase )
pipe_loaded.to(_UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_UpperCAmelCase , _UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe_loaded(**_UpperCAmelCase )[0]
lowercase__ = np.abs(output - output_loaded ).max()
self.assertLess(_UpperCAmelCase , 1E-4 )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_mask_inputs(_UpperCAmelCase )
lowercase__ = pipe.generate_mask(**_UpperCAmelCase )
lowercase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase__ = np.array([0] * 9 )
lowercase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase__ = DPMSolverMultistepScheduler(**_UpperCAmelCase )
lowercase__ = DPMSolverMultistepInverseScheduler(**_UpperCAmelCase )
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCamelCase__ (cls : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase__ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase__ = raw_image
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase , num_inference_steps=25 , ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 305
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case_ : int = flax_key_tuple[:-1] + ('weight',)
snake_case_ : List[str] = torch.permute(__a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__a ):
# linear layer
snake_case_ : Dict = flax_key_tuple[:-1] + ('weight',)
snake_case_ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case_ : List[Any] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
if "metadata" in layer:
snake_case_ : Optional[int] = layer.split('metadata' )
snake_case_ : str = ''.join(split_layer[0] )[:-1]
snake_case_ : Optional[int] = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
snake_case_ : Union[str, Any] = layer.split('kvstore' )
snake_case_ : Any = ''.join(split_layer[0] )[:-1]
snake_case_ : int = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
snake_case_ : str = layer.split('/' )
snake_case_ : str = '/'.join(split_layer[:-1] )
snake_case_ : Dict = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case_ : Tuple = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
snake_case_ : Dict = 'file'
else:
snake_case_ : List[str] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Tuple = rename_keys(__a )
snake_case_ : Dict = {}
for k, v in current_block.items():
snake_case_ : Any = v
snake_case_ : Dict = new_current_block
torch.save(__a , __a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a = WEIGHTS_NAME ):
snake_case_ : List[str] = convert_file_size_to_int(__a )
snake_case_ : Any = []
snake_case_ : Optional[Any] = {}
snake_case_ : str = 0
snake_case_ : Dict = 0
os.makedirs(__a , exist_ok=__a )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
snake_case_ : str = serialization.msgpack_restore(fp.read() )['optimizer']['target']
snake_case_ : int = flatten_dict(__a , sep='/' )
snake_case_ : Dict = {}
for layer in checkpoint_info.keys():
snake_case_ ,snake_case_ ,snake_case_ : int = get_key_and_tensorstore_dict(
__a , __a , __a )
if curr_real_layer_name in all_layers:
snake_case_ : Any = content
else:
snake_case_ : Optional[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case_ : Optional[int] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case_ : Tuple = torch.tensor(__a )
snake_case_ : Tuple = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case_ ,snake_case_ : Union[str, Any] = rename_base_flax_keys(tuple(key.split('/' ) ) , __a )
snake_case_ : Union[str, Any] = '/'.join(__a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case_ : Tuple = os.path.join(
__a , weights_name.replace('.bin' , f"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case_ : int = {}
snake_case_ : Tuple = 0
snake_case_ : Optional[int] = raw_weights.to(getattr(__a , __a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case_ : Optional[int] = os.path.join(__a , weights_name.replace('.bin' , f"""-{len(__a )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__a , __a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case_ : Optional[int] = {}
snake_case_ : Optional[Any] = {}
for idx, shard in enumerate(__a ):
snake_case_ : Optional[Any] = weights_name.replace(
'.bin' , f"""-{idx+1:05d}-of-{len(__a ):05d}.bin""" ) # len(sharded_state_dicts):05d}
snake_case_ : Tuple = os.path.join(__a , weights_name.replace('.bin' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__a , os.path.join(__a , __a ) )
snake_case_ : Optional[Any] = shard
for key in shard:
snake_case_ : int = shard_file
# Add the metadata
snake_case_ : Dict = {'total_size': total_size}
snake_case_ : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__a , __a ) , 'w' , encoding='utf-8' ) as f:
snake_case_ : Tuple = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
return metadata, index
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def SCREAMING_SNAKE_CASE__ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case_ : str = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
snake_case_ : Any = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
snake_case_ : Dict = TaTokenizer.from_pretrained('t5-small' )
snake_case_ : Tuple = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
snake_case_ : Union[str, Any] = tokenizer(__a , return_tensors='pt' ).input_ids
snake_case_ : Optional[Any] = model.generate(__a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 327
|
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305
| 0
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ : int = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum):
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Optional[int] = 1
@add_end_docstrings(UpperCAmelCase__)
class _UpperCAmelCase ( UpperCAmelCase__):
_lowerCAmelCase : str = """generated"""
def __init__( self : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : str , lowercase_ : Optional[int]=None , lowercase_ : List[Any]=None , lowercase_ : List[str]=None , lowercase_ : List[str]=None , lowercase_ : Dict=None , lowercase_ : Tuple=None , **lowercase_ : Dict , ):
snake_case_ : Union[str, Any] = {}
if truncation is not None:
snake_case_ : Dict = truncation
snake_case_ : List[str] = generate_kwargs
snake_case_ : Optional[Any] = {}
if return_tensors is not None and return_type is None:
snake_case_ : Dict = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case_ : Dict = return_type
if clean_up_tokenization_spaces is not None:
snake_case_ : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case_ : Union[str, Any] = self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
snake_case_ : Tuple = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : Any , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
return True
def _snake_case ( self : Union[str, Any] , *lowercase_ : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Optional[int] = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
snake_case_ : Any = ([prefix + arg for arg in args[0]],)
snake_case_ : str = True
elif isinstance(args[0] , _UpperCAmelCase ):
snake_case_ : List[str] = (prefix + args[0],)
snake_case_ : Optional[int] = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
snake_case_ : str = self.tokenizer(*_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[int] ):
snake_case_ : List[str] = super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
if (
isinstance(args[0] , _UpperCAmelCase )
and all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for el in args[0] )
and all(len(_UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Any ):
snake_case_ : Tuple = self._parse_and_tokenize(_UpperCAmelCase , truncation=_UpperCAmelCase , **_UpperCAmelCase )
return inputs
def _snake_case ( self : Optional[int] , lowercase_ : str , **lowercase_ : Any ):
if self.framework == "pt":
snake_case_, snake_case_ : str = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
snake_case_, snake_case_ : Dict = tf.shape(model_inputs['''input_ids'''] ).numpy()
snake_case_ : Dict = generate_kwargs.get('''min_length''' , self.model.config.min_length )
snake_case_ : int = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_UpperCAmelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
snake_case_ : List[str] = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ : Tuple = output_ids.shape[0]
if self.framework == "pt":
snake_case_ : List[str] = output_ids.reshape(_UpperCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case_ : Union[str, Any] = tf.reshape(_UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int=ReturnType.TEXT , lowercase_ : Any=False ):
snake_case_ : Optional[int] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case_ : Any = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
snake_case_ : Any = {
f"{self.return_name}_text": self.tokenizer.decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
}
records.append(_UpperCAmelCase )
return records
@add_end_docstrings(UpperCAmelCase__)
class _UpperCAmelCase ( UpperCAmelCase__):
_lowerCAmelCase : Dict = """summary"""
def __call__( self : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : str ):
return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})" )
@add_end_docstrings(UpperCAmelCase__)
class _UpperCAmelCase ( UpperCAmelCase__):
_lowerCAmelCase : List[str] = """translation"""
def _snake_case ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def _snake_case ( self : List[str] , *lowercase_ : Any , lowercase_ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , _UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*_UpperCAmelCase , return_tensors=self.framework , truncation=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase )
else:
return super()._parse_and_tokenize(*_UpperCAmelCase , truncation=_UpperCAmelCase )
def _snake_case ( self : str , lowercase_ : Any=None , lowercase_ : str=None , **lowercase_ : Any ):
snake_case_, snake_case_, snake_case_ : int = super()._sanitize_parameters(**_UpperCAmelCase )
if src_lang is not None:
snake_case_ : Optional[Any] = src_lang
if tgt_lang is not None:
snake_case_ : Optional[int] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case_ : str = kwargs.get('''task''' , self.task )
snake_case_ : Optional[Any] = task.split('''_''' )
if task and len(_UpperCAmelCase ) == 4:
# translation, XX, to YY
snake_case_ : str = items[1]
snake_case_ : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ):
return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
| 264
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = True
A__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 305
| 0
|
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a__ : Optional[Any] = TypeVar('T')
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
return (2 * position) + 2
class lowercase_ ( Generic[T] ):
def __init__( self ):
UpperCamelCase__ = []
UpperCamelCase__ = {}
UpperCamelCase__ = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def __a ( self ):
return self.elements == 0
def __a ( self , a , a ):
self.heap.append((elem, weight) )
UpperCamelCase__ = self.elements
self.elements += 1
self._bubble_up(_UpperCAmelCase )
def __a ( self ):
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase__ , UpperCamelCase__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase__ , UpperCamelCase__ = self.heap[0]
self._bubble_down(_UpperCAmelCase )
return elem
def __a ( self , a , a ):
UpperCamelCase__ = self.position_map[elem]
UpperCamelCase__ = (elem, weight)
if position > 0:
UpperCamelCase__ = get_parent_position(_UpperCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
else:
self._bubble_down(_UpperCAmelCase )
def __a ( self , a ):
UpperCamelCase__ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase__ = get_parent_position(_UpperCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = self.heap[curr_pos]
UpperCamelCase__ , UpperCamelCase__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_up(_UpperCAmelCase )
return None
def __a ( self , a ):
UpperCamelCase__ = self.position_map[elem]
UpperCamelCase__ , UpperCamelCase__ = self.heap[curr_pos]
UpperCamelCase__ = get_child_left_position(_UpperCAmelCase )
UpperCamelCase__ = get_child_right_position(_UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase__ , UpperCamelCase__ = self.heap[child_left_position]
UpperCamelCase__ , UpperCamelCase__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
if child_left_position < self.elements:
UpperCamelCase__ , UpperCamelCase__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCamelCase__ , UpperCamelCase__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_UpperCAmelCase , _UpperCAmelCase )
return self._bubble_down(_UpperCAmelCase )
return None
def __a ( self , a , a ):
UpperCamelCase__ = self.heap[nodea_pos][0]
UpperCamelCase__ = self.heap[nodea_pos][0]
UpperCamelCase__ , UpperCamelCase__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase__ = nodea_pos
UpperCamelCase__ = nodea_pos
class lowercase_ ( Generic[T] ):
def __init__( self ):
UpperCamelCase__ = {}
UpperCamelCase__ = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def __a ( self , a ):
if node not in self.connections:
UpperCamelCase__ = {}
self.nodes += 1
def __a ( self , a , a , a ):
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
UpperCamelCase__ = weight
UpperCamelCase__ = weight
def _UpperCamelCase ( __A , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
UpperCamelCase__ = {node: maxsize for node in graph.connections}
UpperCamelCase__ = {node: None for node in graph.connections}
UpperCamelCase__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__A , __A )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase__ = priority_queue.extract_min()
UpperCamelCase__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__A , dist[neighbour] )
UpperCamelCase__ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__A , dist[neighbour] )
UpperCamelCase__ = node
return dist, parent
| 80
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
snake_case = (IPNDMScheduler,)
snake_case = (('''num_inference_steps''', 50),)
def lowerCAmelCase ( self : str , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase ( self : Any , __UpperCAmelCase : int=0 , **__UpperCAmelCase : str ):
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Tuple=0 , **__UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[:]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : Tuple , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , "set_timesteps" ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_A = dummy_past_residuals[:]
_A = scheduler.timesteps[5]
_A = scheduler.timesteps[6]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase , time_step=_UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.full_loop()
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 79
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = ShapEImgaImgPipeline
A__ = ['''image''']
A__ = ['''image''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
return 8
@property
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ (self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
lowercase__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowercase__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowercase__ = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 305
| 0
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _snake_case ( lowercase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ :Optional[int] = flatten_dict(lowercase__ )
return flax_params
def _snake_case ( lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = {}
lowerCAmelCase_ :Optional[int] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
lowerCAmelCase_ :int = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCAmelCase_ :Optional[int] = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCAmelCase_ :Union[str, Any] = new_key.replace(lowercase__ , lowercase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCAmelCase_ :str = new_key.replace(lowercase__ , lowercase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCAmelCase_ :Any = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , lowercase__ )
lowerCAmelCase_ :List[Any] = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCAmelCase_ :Optional[Any] = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , lowercase__ )
lowerCAmelCase_ :Any = flax_dict[key]
lowerCAmelCase_ :int = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCAmelCase_ :Optional[int] = torch.from_numpy(converted_dict[key].T )
else:
lowerCAmelCase_ :List[str] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=False , lowercase__ : List[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = get_flax_param(lowercase__ )
if not use_large:
lowerCAmelCase_ :Dict = PixaStructVisionConfig()
lowerCAmelCase_ :Any = PixaStructTextConfig()
else:
lowerCAmelCase_ :Dict = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
lowerCAmelCase_ :Union[str, Any] = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
lowerCAmelCase_ :Tuple = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowercase__ )
lowerCAmelCase_ :Optional[Any] = PixaStructForConditionalGeneration(lowercase__ )
lowerCAmelCase_ :Optional[int] = rename_and_convert_flax_params(lowercase__ )
model.load_state_dict(lowercase__ )
lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
lowerCAmelCase_ :Optional[Any] = PixaStructImageProcessor()
lowerCAmelCase_ :Any = PixaStructProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
if use_large:
lowerCAmelCase_ :str = 4_0_9_6
lowerCAmelCase_ :Tuple = True
# mkdir if needed
os.makedirs(lowercase__ , exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
print("""Model saved in {}""".format(lowercase__ ) )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
__UpperCAmelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 84
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __magic_name__ : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase__ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowercase__ = BeautifulSoup(requests.get(__magic_name__ ).text , """html.parser""" )
lowercase__ = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 305
| 0
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
a__ : List[str] = GPTSanJapaneseTokenizer
a__ : Optional[int] = False
a__ : Tuple = {"""do_clean_text""": False, """add_prefix_space""": False}
def __A ( self : Any ) -> Any:
super().setUp()
# fmt: off
__lowerCamelCase = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
__lowerCamelCase = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCAmelCase ) )
def __A ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
__lowerCamelCase = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
__lowerCamelCase = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
__lowerCamelCase , __lowerCamelCase = self.get_input_output_texts(_UpperCAmelCase )
__lowerCamelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def __A ( self : str ) -> Any:
pass # TODO add if relevant
def __A ( self : Tuple ) -> Any:
pass # TODO add if relevant
def __A ( self : Tuple ) -> List[Any]:
pass # TODO add if relevant
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = self.get_tokenizer()
# Testing tokenization
__lowerCamelCase = '''こんにちは、世界。 こんばんは、㔺界。'''
__lowerCamelCase = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
__lowerCamelCase = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
__lowerCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def __A ( self : Optional[int] ) -> Any:
__lowerCamelCase = self.get_tokenizer()
# Testing tokenization
__lowerCamelCase = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
__lowerCamelCase = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
__lowerCamelCase = tokenizer.encode(_UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def __A ( self : Tuple ) -> Optional[int]:
__lowerCamelCase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__lowerCamelCase = '''こんにちは、世界。'''
__lowerCamelCase = '''こんばんは、㔺界。😀'''
__lowerCamelCase = '''こんにちは、世界。こんばんは、世界。😀'''
__lowerCamelCase = tokenizer.encode(prefix_text + input_text )
__lowerCamelCase = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
__lowerCamelCase = tokenizer.encode(_UpperCAmelCase , prefix_text=_UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(_UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(_UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def __A ( self : List[str] ) -> Any:
__lowerCamelCase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__lowerCamelCase = '''こんにちは、世界。'''
__lowerCamelCase = '''こんばんは、㔺界。😀'''
__lowerCamelCase = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
__lowerCamelCase = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
__lowerCamelCase = [1] + [0] * (len_prefix + len_text + 1)
__lowerCamelCase = [1] * (len_prefix + len_text + 1) + [0]
__lowerCamelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__lowerCamelCase = tokenizer(prefix_text + input_text ).token_type_ids
__lowerCamelCase = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
__lowerCamelCase = tokenizer(_UpperCAmelCase , prefix_text=_UpperCAmelCase ).token_type_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def __A ( self : Tuple ) -> str:
__lowerCamelCase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__lowerCamelCase = tokenizer.encode('''あンいワ''' )
__lowerCamelCase = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
__lowerCamelCase = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __A ( self : Union[str, Any] ) -> Dict:
__lowerCamelCase = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__lowerCamelCase = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
__lowerCamelCase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_encode_plus(_UpperCAmelCase , padding=_UpperCAmelCase )
# fmt: off
__lowerCamelCase = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
__lowerCamelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__lowerCamelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , _UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , _UpperCAmelCase )
def __A ( self : str ) -> Tuple:
pass
def __A ( self : int ) -> Optional[int]:
pass
| 270
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''falcon'''
A__ = ['''past_key_values''']
def __init__(self : str , _UpperCAmelCase : Dict=6_5024 , _UpperCAmelCase : Optional[Any]=4544 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Optional[Any]=71 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=11 , _UpperCAmelCase : Optional[Any]=11 , **_UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("""n_embed""" , _UpperCAmelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 305
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowercase (UpperCAmelCase__ , unittest.TestCase ):
_UpperCamelCase = SpeechTaTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Optional[Any] = SpeechTaTokenizer(_UpperCAmelCase )
__lowerCAmelCase : List[str] = AddedToken('''<mask>''' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : str = '''this is a test'''
__lowerCAmelCase : Union[str, Any] = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ ( self , A_ , A_=False , A_=20 , A_=5 ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : int = self.get_input_output_texts(_UpperCAmelCase )
__lowerCAmelCase : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__lowerCAmelCase : List[str] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = '''<pad>'''
__lowerCAmelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase : List[Any] = tokenizer.vocab_size
__lowerCAmelCase : str = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCAmelCase : int = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__lowerCAmelCase : Optional[int] = tokenizer.add_tokens(_UpperCAmelCase )
__lowerCAmelCase : str = tokenizer.vocab_size
__lowerCAmelCase : List[Any] = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
__lowerCAmelCase : List[Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowerCAmelCase : List[str] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__lowerCAmelCase : Optional[int] = tokenizer.add_special_tokens(_UpperCAmelCase )
__lowerCAmelCase : Tuple = tokenizer.vocab_size
__lowerCAmelCase : Any = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
__lowerCAmelCase : int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : List[str] = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__lowerCAmelCase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__lowerCAmelCase : Optional[Any] = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_UpperCAmelCase , )
| 275
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305
| 0
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
A__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase_ )
A__ , A__ = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
else:
A__ = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase_ )
A__ , A__ = ProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase_ , output_loading_info=UpperCAmelCase_ )
A__ = ["""key_proj""", """value_proj""", """query_proj"""]
A__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
A__ = key.split(""".""" )
if attributes[0] == "lm_head":
A__ = prophet
A__ = prophet_old
else:
A__ = prophet.prophetnet
A__ = prophet_old.model
A__ = False
for attribute in attributes:
if attribute in mapping:
A__ = mapping[attribute]
if not hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) > 0:
A__ = attribute
elif hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
A__ = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
A__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
A__ = old_model.bias
logger.info(F"""{attribute} is initialized""" )
A__ = True
break
elif attribute in special_keys and hasattr(UpperCAmelCase_ , """in_proj_weight""" ):
A__ = old_model.in_proj_weight.shape[0] // 3
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
A__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
A__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
A__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
A__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
A__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
A__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
A__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
A__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
A__ = True
break
if attribute.isdigit():
A__ = model[int(UpperCAmelCase_ )]
A__ = old_model[int(UpperCAmelCase_ )]
else:
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if old_attribute == "":
A__ = old_model
else:
if not hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
A__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 335
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305
| 0
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __snake_case ( UpperCAmelCase__):
def __init__( self : Tuple , __lowerCAmelCase : Distribution , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 1.0 if scale is None else scale
_lowerCamelCase : int = 0.0 if loc is None else loc
super().__init__(_UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_UpperCAmelCase )] )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.variance.sqrt()
class __snake_case ( nn.Module):
def __init__( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Callable[..., Tuple[torch.Tensor]] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
_lowerCamelCase : Union[str, Any] = args_dim
_lowerCamelCase : Dict = nn.ModuleList([nn.Linear(_UpperCAmelCase , _UpperCAmelCase ) for dim in args_dim.values()] )
_lowerCamelCase : Optional[Any] = domain_map
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : torch.Tensor ):
"""simple docstring"""
_lowerCamelCase : Any = [proj(_UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*_UpperCAmelCase )
class __snake_case ( nn.Module):
def __init__( self : str , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : str = function
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int , *__lowerCAmelCase : List[str] ):
"""simple docstring"""
return self.function(_UpperCAmelCase , *_UpperCAmelCase )
class __snake_case :
snake_case__ : Dict = 4_2
snake_case__ : List[Any] = 4_2
snake_case__ : Union[str, Any] = 4_2
def __init__( self : str , __lowerCAmelCase : int = 1 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dim
_lowerCamelCase : Optional[Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*_UpperCAmelCase )
else:
return Independent(self.distribution_class(*_UpperCAmelCase ) , 1 )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self._base_distribution(_UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_UpperCAmelCase , loc=_UpperCAmelCase , scale=_UpperCAmelCase , event_dim=self.event_dim )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.event_shape )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return 0.0
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int ):
"""simple docstring"""
return ParameterProjection(
in_features=_UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : torch.Tensor ):
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : torch.Tensor ):
"""simple docstring"""
return (x + torch.sqrt(torch.square(_UpperCAmelCase ) + 4.0 )) / 2.0
class __snake_case ( UpperCAmelCase__):
snake_case__ : Optional[Any] = {"df": 1, "loc": 1, "scale": 1}
snake_case__ : Union[str, Any] = StudentT
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : torch.Tensor ):
"""simple docstring"""
_lowerCamelCase : List[str] = cls.squareplus(_UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
_lowerCamelCase : Tuple = 2.0 + cls.squareplus(_UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __snake_case ( UpperCAmelCase__):
snake_case__ : List[Any] = {"loc": 1, "scale": 1}
snake_case__ : Tuple = Normal
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : torch.Tensor ):
"""simple docstring"""
_lowerCamelCase : List[Any] = cls.squareplus(_UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __snake_case ( UpperCAmelCase__):
snake_case__ : str = {"total_count": 1, "logits": 1}
snake_case__ : Any = NegativeBinomial
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : torch.Tensor ):
"""simple docstring"""
_lowerCamelCase : str = cls.squareplus(_UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_UpperCAmelCase , logits=_UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=_UpperCAmelCase , logits=_UpperCAmelCase ) , 1 )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 72
|
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "falcon"
UpperCAmelCase__ : Optional[int] = ["past_key_values"]
def __init__( self , _a=6_5_0_2_4 , _a=4_5_4_4 , _a=3_2 , _a=7_1 , _a=1e-5 , _a=0.02 , _a=True , _a=0.0 , _a=0.0 , _a=None , _a=False , _a=False , _a=True , _a=True , _a=False , _a=1_1 , _a=1_1 , **_a , ) -> List[str]:
_a : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
_a : int = kwargs.pop('''n_embed''' , _UpperCAmelCase )
_a : Dict = hidden_size if n_embed is None else n_embed
_a : Optional[Any] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : int = layer_norm_epsilon
_a : Dict = initializer_range
_a : int = use_cache
_a : Optional[Any] = hidden_dropout
_a : Tuple = attention_dropout
_a : Tuple = bos_token_id
_a : Dict = eos_token_id
_a : Dict = num_attention_heads if num_kv_heads is None else num_kv_heads
_a : Dict = alibi
_a : List[Any] = new_decoder_architecture
_a : str = multi_query # Ignored when new_decoder_architecture is True
_a : Dict = parallel_attn
_a : Union[str, Any] = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def __lowercase ( self ) -> int:
return self.hidden_size // self.num_attention_heads
@property
def __lowercase ( self ) -> Tuple:
return not self.alibi
| 235
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305
| 0
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """file.csv"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Dict = tmp_path / """malformed_file.csv"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: List[str] ):
__SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """csv_with_image.csv"""
__SCREAMING_SNAKE_CASE : List[str] = textwrap.dedent(
F"\\n image\n {image_file}\n " )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / """csv_with_label.csv"""
__SCREAMING_SNAKE_CASE : List[Any] = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( _lowerCamelCase: Dict ):
__SCREAMING_SNAKE_CASE : int = tmp_path / """csv_with_int_list.csv"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(_lowerCamelCase , """w""" ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Tuple , _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Optional[int] = Csv()
__SCREAMING_SNAKE_CASE : Optional[int] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCamelCase , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(_lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
with open(_lowerCamelCase , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = f.read().splitlines()[1]
__SCREAMING_SNAKE_CASE : List[Any] = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = csv._generate_tables([[csv_file_with_image]] )
__SCREAMING_SNAKE_CASE : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
__SCREAMING_SNAKE_CASE : str = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
with open(_lowerCamelCase , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE : str = f.read().splitlines()[1:]
__SCREAMING_SNAKE_CASE : Any = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
__SCREAMING_SNAKE_CASE : str = csv._generate_tables([[csv_file_with_label]] )
__SCREAMING_SNAKE_CASE : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
__SCREAMING_SNAKE_CASE : Any = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(_lowerCamelCase ) for label in labels]
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda _lowerCamelCase : [int(_lowerCamelCase ) for i in x.split()]} )
__SCREAMING_SNAKE_CASE : str = csv._generate_tables([[csv_file_with_int_list]] )
__SCREAMING_SNAKE_CASE : Dict = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
__SCREAMING_SNAKE_CASE : Dict = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 112
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int = None , _UpperCAmelCase : int = None ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = pad_token_id
lowercase__ = max_length
lowercase__ = vocab
lowercase__ = merges
lowercase__ = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : GPTaTokenizer , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [""" """.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
lowercase__ = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Any , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return cls(**_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tf_tokenizer(_UpperCAmelCase )
lowercase__ = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__ = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 305
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Tuple = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _UpperCAmelCase ( UpperCAmelCase__):
_lowerCAmelCase : Optional[Any] = """cvt"""
def __init__( self : Any , lowercase_ : Optional[int]=3 , lowercase_ : List[str]=[7, 3, 3] , lowercase_ : int=[4, 2, 2] , lowercase_ : List[Any]=[2, 1, 1] , lowercase_ : Tuple=[64, 192, 384] , lowercase_ : Any=[1, 3, 6] , lowercase_ : Union[str, Any]=[1, 2, 10] , lowercase_ : Optional[Any]=[4.0, 4.0, 4.0] , lowercase_ : Tuple=[0.0, 0.0, 0.0] , lowercase_ : Union[str, Any]=[0.0, 0.0, 0.0] , lowercase_ : Any=[0.0, 0.0, 0.1] , lowercase_ : List[Any]=[True, True, True] , lowercase_ : str=[False, False, True] , lowercase_ : Any=["dw_bn", "dw_bn", "dw_bn"] , lowercase_ : Optional[int]=[3, 3, 3] , lowercase_ : Tuple=[1, 1, 1] , lowercase_ : List[str]=[2, 2, 2] , lowercase_ : Union[str, Any]=[1, 1, 1] , lowercase_ : Optional[Any]=[1, 1, 1] , lowercase_ : Optional[Any]=0.02 , lowercase_ : Dict=1E-12 , **lowercase_ : Tuple , ):
super().__init__(**_UpperCAmelCase )
snake_case_ : int = num_channels
snake_case_ : Union[str, Any] = patch_sizes
snake_case_ : List[str] = patch_stride
snake_case_ : Optional[int] = patch_padding
snake_case_ : int = embed_dim
snake_case_ : int = num_heads
snake_case_ : List[Any] = depth
snake_case_ : Optional[Any] = mlp_ratio
snake_case_ : Tuple = attention_drop_rate
snake_case_ : Any = drop_rate
snake_case_ : str = drop_path_rate
snake_case_ : Union[str, Any] = qkv_bias
snake_case_ : int = cls_token
snake_case_ : Dict = qkv_projection_method
snake_case_ : List[str] = kernel_qkv
snake_case_ : Optional[int] = padding_kv
snake_case_ : Optional[int] = stride_kv
snake_case_ : str = padding_q
snake_case_ : Optional[int] = stride_q
snake_case_ : List[Any] = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
| 264
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
A : Optional[int] = 1_0_0
A : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase ( __magic_name__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ = set()
lowercase__ = 42
lowercase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( __magic_name__ : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __magic_name__ ):
if len(partition(__magic_name__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 305
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a__ : List[Any] = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowercase_ ( unittest.TestCase ):
@classmethod
def __a ( cls ):
UpperCamelCase__ = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def __a ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __a ( self ):
UpperCamelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase__ = FlaxBertModel(_UpperCAmelCase )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
UpperCamelCase__ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCamelCase__ = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_UpperCAmelCase , repo_id="test-model-flax" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCamelCase__ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCamelCase__ = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f'''{key} not identical''' )
def __a ( self ):
UpperCamelCase__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase__ = FlaxBertModel(_UpperCAmelCase )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
UpperCamelCase__ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
UpperCamelCase__ = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_UpperCAmelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
UpperCamelCase__ = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
UpperCamelCase__ = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f'''{key} not identical''' )
def _UpperCamelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = True
UpperCamelCase__ = flatten_dict(modela.params )
UpperCamelCase__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCamelCase__ = False
return models_are_equal
@require_flax
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
UpperCamelCase__ = FlaxBertModel(_UpperCAmelCase )
UpperCamelCase__ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
UpperCamelCase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.assertTrue(check_models_equal(_UpperCAmelCase , _UpperCAmelCase ) )
def __a ( self ):
UpperCamelCase__ = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
UpperCamelCase__ = FlaxBertModel(_UpperCAmelCase )
UpperCamelCase__ = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , max_shard_size="10KB" )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
UpperCamelCase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.assertTrue(check_models_equal(_UpperCAmelCase , _UpperCAmelCase ) )
def __a ( self ):
UpperCamelCase__ = "bert"
UpperCamelCase__ = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
UpperCamelCase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __a ( self ):
UpperCamelCase__ = "bert"
UpperCamelCase__ = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
UpperCamelCase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 80
|
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0] * len(__magic_name__ )
lowercase__ = []
lowercase__ = [1] * len(__magic_name__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__magic_name__ )
print(max(__magic_name__ ) )
# Adjacency list of Graph
A : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 305
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any=13 , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[int]=99 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[str]=512 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : str="last" , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_lengths
_A = use_token_type_ids
_A = use_labels
_A = gelu_activation
_A = sinusoidal_embeddings
_A = causal
_A = asm
_A = n_langs
_A = vocab_size
_A = n_special
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = summary_type
_A = use_proj
_A = scope
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_input_lengths:
_A = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , 2 ).float()
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , ):
'''simple docstring'''
_A = FlaubertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase )
_A = model(_UpperCAmelCase , langs=_UpperCAmelCase )
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , ):
'''simple docstring'''
_A = FlaubertWithLMHeadModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , ):
'''simple docstring'''
_A = FlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , ):
'''simple docstring'''
_A = FlaubertForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , )
_A = model(
_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , )
((_A ) , ) = result_with_labels.to_tuple()
_A = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
((_A ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
_A = FlaubertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
_A = self.num_labels
_A = FlaubertForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , ):
'''simple docstring'''
_A = self.num_choices
_A = FlaubertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int=False ):
'''simple docstring'''
_A = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = FlaubertModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_UpperCAmelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = FlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_A = True
_A = model_class(config=_UpperCAmelCase )
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_A = torch.jit.trace(
_UpperCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , "traced_model.pt" ) )
_A = torch.jit.load(os.path.join(_UpperCAmelCase , "traced_model.pt" ) , map_location=_UpperCAmelCase )
loaded(inputs_dict["input_ids"].to(_UpperCAmelCase ) , inputs_dict["attention_mask"].to(_UpperCAmelCase ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
_A = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_A = model(_UpperCAmelCase )[0]
_A = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
_A = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 79
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = gather(__magic_name__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = [state.process_index]
lowercase__ = gather_object(__magic_name__ )
assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = broadcast(__magic_name__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
if state.is_main_process:
lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase__ = torch.arange(state.num_processes ).to(state.device )
lowercase__ = pad_across_processes(__magic_name__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """sum""" )
lowercase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """mean""" )
lowercase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
main()
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__magic_name__ )
state.print("""testing gather_object""" )
test_gather_object(__magic_name__ )
state.print("""testing broadcast""" )
test_broadcast(__magic_name__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__magic_name__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(__magic_name__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(__magic_name__ )
if __name__ == "__main__":
main()
| 305
| 0
|
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
__UpperCAmelCase = 1_00
__UpperCAmelCase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__UpperCAmelCase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _snake_case ( lowercase__ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase_ :Dict = set()
lowerCAmelCase_ :List[str] = 4_2
lowerCAmelCase_ :int = 4_2
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _snake_case ( lowercase__ : int = 5_0_0_0 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84
|
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
| 0
|
SCREAMING_SNAKE_CASE__ : Tuple = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
SCREAMING_SNAKE_CASE__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ : List[Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 270
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 0
|
def _lowercase ( lowercase__ ):
if n_term == "":
return []
__lowerCAmelCase : List[str] = []
for temp in range(int(lowercase__ ) ):
series.append(f"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
_UpperCamelCase = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 275
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
| 0
|
"""simple docstring"""
from collections.abc import Generator
from math import sin
def _snake_case ( UpperCAmelCase_ : bytes ):
if len(UpperCAmelCase_ ) != 32:
raise ValueError("""Input must be of length 32""" )
A__ = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _snake_case ( UpperCAmelCase_ : int ):
if i < 0:
raise ValueError("""Input must be non-negative""" )
A__ = format(UpperCAmelCase_ , """08x""" )[-8:]
A__ = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def _snake_case ( UpperCAmelCase_ : bytes ):
A__ = b""""""
for char in message:
bit_string += format(UpperCAmelCase_ , """08b""" ).encode("""utf-8""" )
A__ = format(len(UpperCAmelCase_ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase_ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _snake_case ( UpperCAmelCase_ : bytes ):
if len(UpperCAmelCase_ ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(UpperCAmelCase_ ) , 512 ):
A__ = bit_string[pos : pos + 512]
A__ = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _snake_case ( UpperCAmelCase_ : int ):
if i < 0:
raise ValueError("""Input must be non-negative""" )
A__ = format(UpperCAmelCase_ , """032b""" )
A__ = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase_ , 2 )
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return (a + b) % 2**32
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _snake_case ( UpperCAmelCase_ : bytes ):
A__ = preprocess(UpperCAmelCase_ )
A__ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A__ = 0x67_452_301
A__ = 0xEF_CDA_B89
A__ = 0x98_BAD_CFE
A__ = 0x10_325_476
A__ = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase_ ):
A__ = aa
A__ = ba
A__ = ca
A__ = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A__ = d ^ (b & (c ^ d))
A__ = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A__ = c ^ (d & (b ^ c))
A__ = (5 * i + 1) % 16
elif i <= 47:
A__ = b ^ c ^ d
A__ = (3 * i + 5) % 16
else:
A__ = c ^ (b | not_aa(UpperCAmelCase_ ))
A__ = (7 * i) % 16
A__ = (f + a + added_consts[i] + block_words[g]) % 2**32
A__ = d
A__ = c
A__ = b
A__ = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
A__ = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['DPTFeatureExtractor']
A : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 305
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def snake_case_ ( A_ : np.ndarray ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : int = np.shape(A_ )
if rows != columns:
_lowerCamelCase : int = (
'''\'table\' has to be of square shaped array but got a '''
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(A_ )
_lowerCamelCase : Any = np.zeros((rows, columns) )
_lowerCamelCase : List[Any] = np.zeros((rows, columns) )
for i in range(A_ ):
for j in range(A_ ):
_lowerCamelCase : Any = sum(lower[i][k] * upper[k][j] for k in range(A_ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_lowerCamelCase : int = (table[i][j] - total) / upper[j][j]
_lowerCamelCase : List[str] = 1
for j in range(A_, A_ ):
_lowerCamelCase : List[str] = sum(lower[i][k] * upper[k][j] for k in range(A_ ) )
_lowerCamelCase : Tuple = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_UpperCAmelCase )
_a : List[Any] = size if size is not None else {'''shortest_edge''': 2_5_6}
_a : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_a : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Tuple = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
_a : Tuple = do_resize
_a : Optional[Any] = size
_a : Optional[Any] = resample
_a : Union[str, Any] = do_center_crop
_a : Optional[Any] = crop_size
_a : str = do_rescale
_a : Tuple = rescale_factor
_a : Dict = do_normalize
_a : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_a : Dict = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_a : List[Any] = get_resize_output_image_size(_UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
_a : Tuple = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __lowercase ( self , _a , _a , _a = None , **_a ) -> np.ndarray:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> int:
_a : str = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : List[str] = crop_size if crop_size is not None else self.crop_size
_a : Any = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
_a : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : str = do_normalize if do_normalize is not None else self.do_normalize
_a : Optional[int] = image_mean if image_mean is not None else self.image_mean
_a : List[str] = image_std if image_std is not None else self.image_std
_a : Dict = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a : int = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_a : Any = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
_a : Dict = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
_a : int = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
_a : int = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
_a : str = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_a : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def __lowercase ( self , _a , _a = None ) -> Optional[Any]:
_a : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_UpperCAmelCase ):
_a : List[Any] = target_sizes.numpy()
_a : int = []
for idx in range(len(_UpperCAmelCase ) ):
_a : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_UpperCAmelCase )
_a : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCAmelCase )
else:
_a : Any = logits.argmax(dim=1 )
_a : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 235
|
A : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 305
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Optional[int] = create_tensor(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = gather(_lowerCamelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [state.process_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = gather_object(_lowerCamelCase )
assert len(_lowerCamelCase ) == state.num_processes, F"{gathered_obj}, {len(_lowerCamelCase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : Optional[Any] = create_tensor(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = broadcast(_lowerCamelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
if state.is_main_process:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__SCREAMING_SNAKE_CASE : Dict = torch.arange(state.num_processes ).to(state.device )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pad_across_processes(_lowerCamelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] ):
if state.num_processes != 2:
return
__SCREAMING_SNAKE_CASE : Optional[int] = create_tensor(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = reduce(_lowerCamelCase , """sum""" )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase ), F"{reduced_tensor} != {truth_tensor}"
def lowerCAmelCase_ ( _lowerCamelCase: Dict ):
if state.num_processes != 2:
return
__SCREAMING_SNAKE_CASE : str = create_tensor(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = reduce(_lowerCamelCase , """mean""" )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase ), F"{reduced_tensor} != {truth_tensor}"
def lowerCAmelCase_ ( _lowerCamelCase: str ):
main()
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = PartialState()
state.print(F"State: {state}" )
state.print("""testing gather""" )
test_gather(_lowerCamelCase )
state.print("""testing gather_object""" )
test_gather_object(_lowerCamelCase )
state.print("""testing broadcast""" )
test_broadcast(_lowerCamelCase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_lowerCamelCase )
state.print("""testing reduce_sum""" )
test_reduce_sum(_lowerCamelCase )
state.print("""testing reduce_mean""" )
test_reduce_mean(_lowerCamelCase )
if __name__ == "__main__":
main()
| 112
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionDiffEditPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
lowercase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
lowercase__ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_zero=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase__ = CLIPTextModel(_UpperCAmelCase )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=0 ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor((1, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe(**_UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__ = self.pipeline_class.from_pretrained(_UpperCAmelCase )
pipe_loaded.to(_UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_UpperCAmelCase , _UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe_loaded(**_UpperCAmelCase )[0]
lowercase__ = np.abs(output - output_loaded ).max()
self.assertLess(_UpperCAmelCase , 1E-4 )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_mask_inputs(_UpperCAmelCase )
lowercase__ = pipe.generate_mask(**_UpperCAmelCase )
lowercase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase__ = np.array([0] * 9 )
lowercase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase__ = DPMSolverMultistepScheduler(**_UpperCAmelCase )
lowercase__ = DPMSolverMultistepInverseScheduler(**_UpperCAmelCase )
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCamelCase__ (cls : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase__ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase__ = raw_image
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase , num_inference_steps=25 , ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 305
| 0
|
import itertools
import math
def SCREAMING_SNAKE_CASE__ ( __a ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Union[str, Any] = 2
while True:
if is_prime(__a ):
yield num
num += 1
def SCREAMING_SNAKE_CASE__ ( __a = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327
|
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Any = StableDiffusionDiffEditPipeline
_lowerCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
_lowerCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
_lowerCAmelCase : Optional[int] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase : str = frozenset([])
def _snake_case ( self : List[str] ):
torch.manual_seed(0 )
snake_case_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
snake_case_ : List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
snake_case_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCAmelCase , set_alpha_to_zero=_UpperCAmelCase , )
torch.manual_seed(0 )
snake_case_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
snake_case_ : Any = CLIPTextModel(_UpperCAmelCase )
snake_case_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple=0 ):
snake_case_ : Any = floats_tensor((1, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ : str = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ : List[Any] = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self : List[Any] , lowercase_ : Dict , lowercase_ : Tuple=0 ):
snake_case_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Union[str, Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ : Optional[int] = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ : List[str] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ : Any = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Dict=0 ):
snake_case_ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : str = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ : int = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ : List[str] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ : str = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self : int ):
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : int = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
snake_case_ : Tuple = self.get_dummy_inputs(_UpperCAmelCase )
snake_case_ : Optional[Any] = pipe(**_UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_UpperCAmelCase )
snake_case_ : Any = self.pipeline_class.from_pretrained(_UpperCAmelCase )
pipe_loaded.to(_UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_UpperCAmelCase , _UpperCAmelCase ) is None , f"`{optional_component}` did not stay set to None after loading." , )
snake_case_ : Tuple = self.get_dummy_inputs(_UpperCAmelCase )
snake_case_ : List[Any] = pipe_loaded(**_UpperCAmelCase )[0]
snake_case_ : Dict = np.abs(output - output_loaded ).max()
self.assertLess(_UpperCAmelCase , 1E-4 )
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = '''cpu'''
snake_case_ : Tuple = self.get_dummy_components()
snake_case_ : Union[str, Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ : Union[str, Any] = self.get_dummy_mask_inputs(_UpperCAmelCase )
snake_case_ : int = pipe.generate_mask(**_UpperCAmelCase )
snake_case_ : Optional[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
snake_case_ : Dict = np.array([0] * 9 )
snake_case_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _snake_case ( self : List[Any] ):
snake_case_ : Dict = '''cpu'''
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : List[str] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ : Any = self.get_dummy_inversion_inputs(_UpperCAmelCase )
snake_case_ : Union[str, Any] = pipe.invert(**_UpperCAmelCase ).images
snake_case_ : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
snake_case_ : List[str] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
snake_case_ : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def _snake_case ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _snake_case ( self : str ):
snake_case_ : List[str] = '''cpu'''
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : Dict = {'''beta_start''': 0.0_00_85, '''beta_end''': 0.0_12, '''beta_schedule''': '''scaled_linear'''}
snake_case_ : Any = DPMSolverMultistepScheduler(**_UpperCAmelCase )
snake_case_ : int = DPMSolverMultistepInverseScheduler(**_UpperCAmelCase )
snake_case_ : Union[str, Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ : Tuple = self.get_dummy_inversion_inputs(_UpperCAmelCase )
snake_case_ : str = pipe.invert(**_UpperCAmelCase ).images
snake_case_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
snake_case_ : Union[str, Any] = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
snake_case_ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _snake_case ( cls : str ):
snake_case_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
snake_case_ : List[str] = raw_image.convert('''RGB''' ).resize((768, 768) )
snake_case_ : Optional[Any] = raw_image
def _snake_case ( self : Optional[int] ):
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
snake_case_ : Optional[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
snake_case_ : str = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ : Any = '''a bowl of fruit'''
snake_case_ : Tuple = '''a bowl of pears'''
snake_case_ : str = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
snake_case_ : Union[str, Any] = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase ).latents
snake_case_ : str = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
snake_case_ : List[str] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _snake_case ( self : int ):
snake_case_ : Dict = torch.manual_seed(0 )
snake_case_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
snake_case_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
snake_case_ : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ : Any = '''a bowl of fruit'''
snake_case_ : int = '''a bowl of pears'''
snake_case_ : str = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
snake_case_ : Optional[Any] = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase , num_inference_steps=25 , ).latents
snake_case_ : List[Any] = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
snake_case_ : Optional[int] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 264
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = True
A__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 305
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( UpperCAmelCase__ ):
__UpperCAmelCase = ['image_processor', 'tokenizer']
__UpperCAmelCase = 'LayoutLMv2ImageProcessor'
__UpperCAmelCase = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self , a=None , a=None , **a ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
UpperCamelCase__ = kwargs.pop("feature_extractor" )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , a , a = None , a = None , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ):
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
UpperCamelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ = features["words"]
UpperCamelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
UpperCamelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
UpperCamelCase__ = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
UpperCamelCase__ = images
return encoded_inputs
def __a ( self , a , a ):
UpperCamelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}''' )
return images_with_overflow
def __a ( self , *a , **a ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self , *a , **a ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def __a ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __a ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def __a ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 80
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
| 0
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
'''simple docstring'''
_A = int(np.ceil((x_end - xa) / step_size ) )
_A = np.zeros((n + 1,) )
_A = ya
_A = xa
for k in range(__lowercase ):
_A = y[k] + step_size * ode_func(__lowercase , y[k] )
_A = y[k] + (
(step_size / 2) * (ode_func(__lowercase , y[k] ) + ode_func(x + step_size , __lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = ShapEImgaImgPipeline
A__ = ['''image''']
A__ = ['''image''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
return 8
@property
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ (self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
lowercase__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowercase__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowercase__ = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 305
| 0
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__UpperCAmelCase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def _snake_case ( lowercase__ : Tuple=True ) -> List[Any]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase__ ) )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
UpperCAmelCase_ :Optional[Any] = None
UpperCAmelCase_ :List[Any] = None
def __lowerCAmelCase ( self , __A , __A ) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ :Tuple = dataset_module_factory(_UpperCAmelCase , cache_dir=_UpperCAmelCase )
lowerCAmelCase_ :List[Any] = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase )
lowerCAmelCase_ :Tuple = builder_cls(
cache_dir=_UpperCAmelCase , config_name=_UpperCAmelCase , hash=dataset_module.hash , )
lowerCAmelCase_ :Tuple = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_UpperCAmelCase ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase_ :Any = cached_path(_UpperCAmelCase , cache_dir=_UpperCAmelCase )
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
@pytest.mark.integration
def _snake_case ( lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
lowerCAmelCase_ :Tuple = dataset_module_factory("""wikipedia""" , cache_dir=lowercase__ )
lowerCAmelCase_ :List[Any] = import_main_class(dataset_module.module_path )
lowerCAmelCase_ :Dict = builder_cls(
cache_dir=lowercase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase_ :str = None
builder_instance.download_and_prepare()
lowerCAmelCase_ :Dict = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( lowercase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = dataset_module_factory("""wikipedia""" , cache_dir=lowercase__ )
lowerCAmelCase_ :List[Any] = import_main_class(dataset_module.module_path , dataset=lowercase__ )
lowerCAmelCase_ :int = builder_cls(
cache_dir=lowercase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
lowerCAmelCase_ :Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase__ , lowercase__ )
assert "train" in ds
assert isinstance(ds["""train"""] , lowercase__ )
assert next(iter(ds["""train"""] ) )
| 84
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __magic_name__ : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase__ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowercase__ = BeautifulSoup(requests.get(__magic_name__ ).text , """html.parser""" )
lowercase__ = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 305
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=1024 ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = list(zip(__lowerCAmelCase , __lowerCAmelCase ) )
__lowerCamelCase , __lowerCamelCase = sorted_examples[0]
def is_too_big(__lowerCAmelCase : Union[str, Any] ):
return tok(__lowerCAmelCase , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase = new_src + ''' ''' + src
__lowerCamelCase = new_tgt + ''' ''' + tgt
if is_too_big(__lowerCAmelCase ) or is_too_big(__lowerCAmelCase ): # cant fit, finalize example
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowerCAmelCase )
finished_tgt.append(__lowerCAmelCase )
return finished_src, finished_tgt
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Path , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> List[str]:
__lowerCamelCase = Path(__lowerCAmelCase )
save_path.mkdir(exist_ok=__lowerCAmelCase )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
__lowerCamelCase = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
__lowerCamelCase = [x.rstrip() for x in Path(__lowerCAmelCase ).open().readlines()]
__lowerCamelCase , __lowerCamelCase = pack_examples(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
print(f'''packed {split} split from {len(__lowerCAmelCase )} examples -> {len(__lowerCAmelCase )}.''' )
Path(save_path / f'''{split}.source''' ).open('''w''' ).write('''\n'''.join(__lowerCAmelCase ) )
Path(save_path / f'''{split}.target''' ).open('''w''' ).write('''\n'''.join(__lowerCAmelCase ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(__lowerCAmelCase , save_path / f'''{split}.source''' )
shutil.copyfile(__lowerCAmelCase , save_path / f'''{split}.target''' )
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=__lowerCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=__lowerCAmelCase , default=128 )
parser.add_argument('''--data_dir''' , type=__lowerCAmelCase )
parser.add_argument('''--save_path''' , type=__lowerCAmelCase )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 270
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''falcon'''
A__ = ['''past_key_values''']
def __init__(self : str , _UpperCAmelCase : Dict=6_5024 , _UpperCAmelCase : Optional[Any]=4544 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Optional[Any]=71 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=11 , _UpperCAmelCase : Optional[Any]=11 , **_UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("""n_embed""" , _UpperCAmelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 305
| 0
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCamelCase = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class __lowercase :
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
@dataclass(frozen=UpperCAmelCase__ )
class __lowercase :
_UpperCamelCase = 42
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __lowercase (UpperCAmelCase__ ):
_UpperCamelCase = 42
def __init__( self , A_ , A_ , A_ , A_ = None , A_=False , A_ = False , ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = hans_processors[task]()
__lowerCAmelCase : Tuple = os.path.join(
_UpperCAmelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
__lowerCAmelCase : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCAmelCase, __lowerCAmelCase : str = label_list[2], label_list[1]
__lowerCAmelCase : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__lowerCAmelCase : Optional[int] = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__lowerCAmelCase : Optional[Any] = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('''Training examples: %s''' , len(_UpperCAmelCase ) )
__lowerCAmelCase : List[Any] = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('''Saving features into cached file %s''' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) ->int:
'''simple docstring'''
return len(self.features )
def __getitem__( self , A_ ) ->InputFeatures:
'''simple docstring'''
return self.features[i]
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class __lowercase :
_UpperCamelCase = 42
def __init__( self , A_ , A_ , A_ , A_ = 128 , A_=False , A_ = False , ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = hans_processors[task]()
__lowerCAmelCase : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCAmelCase, __lowerCAmelCase : str = label_list[2], label_list[1]
__lowerCAmelCase : int = label_list
__lowerCAmelCase : Optional[Any] = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
__lowerCAmelCase : Dict = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__lowerCAmelCase : Optional[int] = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return self.dataset
def __len__( self ) ->Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self , A_ ) ->InputFeatures:
'''simple docstring'''
return self.features[i]
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
return self.label_list
class __lowercase (UpperCAmelCase__ ):
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def UpperCamelCase__ ( self , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
__lowerCAmelCase : Union[str, Any] = '''%s-%s''' % (set_type, line[0])
__lowerCAmelCase : Any = line[5]
__lowerCAmelCase : Dict = line[6]
__lowerCAmelCase : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__lowerCAmelCase : Dict = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__lowerCAmelCase : int = {label: i for i, label in enumerate(lowercase__ )}
__lowerCAmelCase : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(lowercase__ ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__lowerCAmelCase : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowercase__ , max_length=lowercase__ , padding='''max_length''' , truncation=lowercase__ , return_overflowing_tokens=lowercase__ , )
__lowerCAmelCase : Tuple = label_map[example.label] if example.label in label_map else 0
__lowerCAmelCase : List[str] = int(example.pairID )
features.append(InputFeatures(**lowercase__ , label=lowercase__ , pairID=lowercase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
_UpperCamelCase = {
'hans': 3,
}
_UpperCamelCase = {
'hans': HansProcessor,
}
| 275
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
def _snake_case ( UpperCAmelCase_ : Union[tf.Tensor, np.ndarray] ):
if isinstance(UpperCAmelCase_ , np.ndarray ):
return list(tensor.shape )
A__ = tf.shape(UpperCAmelCase_ )
if tensor.shape == tf.TensorShape(UpperCAmelCase_ ):
return dynamic
A__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase_ )]
def _snake_case ( UpperCAmelCase_ : tf.Tensor , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase_ , name=UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=1e-5 , UpperCAmelCase_ : Any=-1 ):
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A__ , A__ = tf.nn.moments(UpperCAmelCase_ , axes=[axis] , keepdims=UpperCAmelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A__ = [1] * inputs.shape.rank
A__ = shape_list(UpperCAmelCase_ )[axis]
A__ = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
# Compute layer normalization using the batch_normalization
# function.
A__ = tf.nn.batch_normalization(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , offset=UpperCAmelCase_ , scale=UpperCAmelCase_ , variance_epsilon=UpperCAmelCase_ , )
return outputs
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Optional[Any]=-1 ):
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A__ = tf.shape(UpperCAmelCase_ )
A__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : tf.Tensor ):
if not isinstance(UpperCAmelCase_ , tf.Tensor ):
A__ = tf.convert_to_tensor(UpperCAmelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _snake_case ( UpperCAmelCase_ : tf.Tensor , UpperCAmelCase_ : int , UpperCAmelCase_ : str = "input_ids" ):
tf.debugging.assert_less(
UpperCAmelCase_ , tf.cast(UpperCAmelCase_ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase_ )}) must be smaller than the embedding """
F"""layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
A__ = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A__ = [x for x in data if len(UpperCAmelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
A__ = np.asarray(UpperCAmelCase_ )
A__ = 1
A__ = np.array_split(UpperCAmelCase_ , UpperCAmelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A__ = np.array_split(UpperCAmelCase_ , UpperCAmelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase_ ):
A__ = chunk_data
else:
A__ = data
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
if name in group.attrs:
A__ = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase_ , """decode""" ) else n for n in group.attrs[name]]
else:
A__ = []
A__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase_ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _snake_case ( UpperCAmelCase_ : Optional[Any] ):
def _expand_single_ad_tensor(UpperCAmelCase_ : List[Any] ):
if isinstance(UpperCAmelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase_ )
| 335
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305
| 0
|
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''', [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
], )
def snake_case_ ( A_ : Union[str, Any], A_ : Any, A_ : str, A_ : List[str], A_ : List[Any], A_ : int, A_ : List[Any], A_ : Any, A_ : int, A_ : List[Any], A_ : int, A_ : Any, ):
'''simple docstring'''
_lowerCamelCase : str = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
_lowerCamelCase , _lowerCamelCase : Optional[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowerCamelCase : List[Any] = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A_ )
assert base_extractor.is_extractable(A_ )
_lowerCamelCase : Union[str, Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(A_, A_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowerCamelCase : List[str] = file_path.read_text(encoding='''utf-8''' )
else:
_lowerCamelCase : int = output_path.read_text(encoding='''utf-8''' )
_lowerCamelCase : Union[str, Any] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''', [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
], )
def snake_case_ ( A_ : Union[str, Any], A_ : List[str], A_ : List[str], A_ : List[Any], A_ : int, A_ : List[str], A_ : Optional[Any], A_ : str, A_ : int, A_ : Optional[int], A_ : int, A_ : int, ):
'''simple docstring'''
_lowerCamelCase : str = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
_lowerCamelCase : Dict = input_paths[compression_format]
if input_path is None:
_lowerCamelCase : Union[str, Any] = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A_ )
_lowerCamelCase : Optional[Any] = Extractor.infer_extractor_format(A_ )
assert extractor_format is not None
_lowerCamelCase : int = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(A_, A_, A_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowerCamelCase : List[str] = file_path.read_text(encoding='''utf-8''' )
else:
_lowerCamelCase : Optional[Any] = output_path.read_text(encoding='''utf-8''' )
_lowerCamelCase : List[Any] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def snake_case_ ( A_ : List[Any], A_ : Optional[Any] ):
'''simple docstring'''
import tarfile
_lowerCamelCase : str = tmp_path / '''data_dot_dot'''
directory.mkdir()
_lowerCamelCase : List[str] = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(A_, '''w''' ) as f:
f.add(A_, arcname=os.path.join('''..''', text_file.name ) )
return path
@pytest.fixture
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
import tarfile
_lowerCamelCase : List[str] = tmp_path / '''data_sym_link'''
directory.mkdir()
_lowerCamelCase : str = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''', directory / '''subdir''', target_is_directory=A_ )
with tarfile.TarFile(A_, '''w''' ) as f:
f.add(str(directory / '''subdir''' ), arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''', [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')], )
def snake_case_ ( A_ : List[str], A_ : Optional[Any], A_ : Tuple, A_ : Optional[int], A_ : Tuple, A_ : int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
_lowerCamelCase : Optional[Any] = insecure_tar_files[insecure_tar_file]
_lowerCamelCase : str = tmp_path / '''extracted'''
TarExtractor.extract(A_, A_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
_lowerCamelCase : Union[str, Any] = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(A_ )
assert zipfile.is_zipfile(str(A_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(A_ ) # but we're right
| 72
|
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
lowercase__ = {}
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if vertex not in self.adjacency:
lowercase__ = {}
self.num_vertices += 1
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_UpperCAmelCase )
self.add_vertex(_UpperCAmelCase )
if head == tail:
return
lowercase__ = weight
lowercase__ = weight
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = weight
lowercase__ = weight
def __str__(self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Graph()
if vertices is None:
lowercase__ = []
if edges is None:
lowercase__ = []
for vertex in vertices:
g.add_vertex(_UpperCAmelCase )
for edge in edges:
g.add_edge(*_UpperCAmelCase )
return g
class A :
'''simple docstring'''
def __init__(self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = {}
lowercase__ = {}
def __len__(self : Optional[Any] ) -> Dict:
"""simple docstring"""
return len(self.parent )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_UpperCAmelCase )
lowercase__ = item
lowercase__ = 0
return item
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_UpperCAmelCase )
if item != self.parent[item]:
lowercase__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.find(_UpperCAmelCase )
lowercase__ = self.find(_UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ = roota
return roota
return None
@staticmethod
def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = graph.num_vertices
lowercase__ = Graph.UnionFind()
lowercase__ = []
while num_components > 1:
lowercase__ = {}
for vertex in graph.get_vertices():
lowercase__ = -1
lowercase__ = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = union_find.find(_UpperCAmelCase )
lowercase__ = union_find.find(_UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex]
if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ):
union_find.union(_UpperCAmelCase , _UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
lowercase__ = num_components - 1
lowercase__ = Graph.build(edges=_UpperCAmelCase )
return mst
| 305
| 0
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a__ = 'main'
# Default branch name
a__ = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
a__ = 'aaaaaaa'
# This commit does not exist, so we should 404.
a__ = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
a__ = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowercase ( self , _a ) -> int:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowercase ( self , _a ) -> Optional[Any]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __lowercase ( self , _a ) -> List[Any]:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def __lowercase ( self ) -> List[str]:
self.assertEqual(find_labels(_UpperCAmelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['''start_positions''', '''end_positions'''] )
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , ['''labels'''] )
@require_tf
def __lowercase ( self ) -> str:
self.assertEqual(find_labels(_UpperCAmelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['''start_positions''', '''end_positions'''] )
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , ['''labels'''] )
@require_flax
def __lowercase ( self ) -> List[Any]:
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
| 235
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 305
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Optional[int] = [1]
for i in range(2 , _lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : List[str] = list(range(_lowerCamelCase ) )
# Find permutation
while factorials:
__SCREAMING_SNAKE_CASE : Dict = factorials.pop()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = divmod(_lowerCamelCase , _lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A : Any = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
else:
lowercase__ = ProphetNetForConditionalGenerationOld.from_pretrained(__magic_name__ )
lowercase__ , lowercase__ = ProphetNetForConditionalGeneration.from_pretrained(
__magic_name__ , output_loading_info=__magic_name__ )
lowercase__ = ["""key_proj""", """value_proj""", """query_proj"""]
lowercase__ = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
lowercase__ = key.split(""".""" )
if attributes[0] == "lm_head":
lowercase__ = prophet
lowercase__ = prophet_old
else:
lowercase__ = prophet.prophetnet
lowercase__ = prophet_old.model
lowercase__ = False
for attribute in attributes:
if attribute in mapping:
lowercase__ = mapping[attribute]
if not hasattr(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) > 0:
lowercase__ = attribute
elif hasattr(__magic_name__ , __magic_name__ ):
lowercase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase__ = True
break
elif attribute in special_keys and hasattr(__magic_name__ , """in_proj_weight""" ):
lowercase__ = old_model.in_proj_weight.shape[0] // 3
lowercase__ = getattr(__magic_name__ , __magic_name__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ = True
break
if attribute.isdigit():
lowercase__ = model[int(__magic_name__ )]
lowercase__ = old_model[int(__magic_name__ )]
else:
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if old_attribute == "":
lowercase__ = old_model
else:
if not hasattr(__magic_name__ , __magic_name__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 305
| 0
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Union[str, Any] , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase_ ( self : int ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = None
snake_case_ : str = 20
snake_case_ : Optional[int] = self._get_uniform_logits(batch_size=2 , length=_UpperCAmelCase )
# tweak scores to not be uniform anymore
snake_case_ : Tuple = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
snake_case_ : Optional[int] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
snake_case_ : Optional[Any] = jax.nn.softmax(_UpperCAmelCase , axis=-1 )
snake_case_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case_ : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
snake_case_ : List[Any] = jax.nn.softmax(temp_dist_warper_sharper(_UpperCAmelCase , scores.copy() , cur_len=_UpperCAmelCase ) , axis=-1 )
snake_case_ : str = jax.nn.softmax(temp_dist_warper_smoother(_UpperCAmelCase , scores.copy() , cur_len=_UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = None
snake_case_ : Optional[int] = 10
snake_case_ : Tuple = 2
# create ramp distribution
snake_case_ : str = np.broadcast_to(np.arange(_UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
snake_case_ : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
snake_case_ : int = FlaxTopKLogitsWarper(3 )
snake_case_ : List[str] = top_k_warp(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
snake_case_ : List[str] = 5
snake_case_ : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
snake_case_ : Optional[Any] = np.broadcast_to(np.arange(_UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
snake_case_ : Union[str, Any] = top_k_warp_safety_check(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = None
snake_case_ : List[Any] = 10
snake_case_ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
snake_case_ : str = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
snake_case_ : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
snake_case_ : List[Any] = np.exp(top_p_warp(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
snake_case_ : int = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
snake_case_ : Tuple = np.broadcast_to(np.arange(_UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
snake_case_ : Optional[Any] = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
snake_case_ : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
snake_case_ : Union[str, Any] = top_p_warp(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = 20
snake_case_ : str = 4
snake_case_ : Tuple = 0
snake_case_ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_UpperCAmelCase )
# check that min length is applied at length 5
snake_case_ : List[str] = ids_tensor((batch_size, 20) , vocab_size=20 )
snake_case_ : str = 5
snake_case_ : Optional[int] = self._get_uniform_logits(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : int = min_dist_processor(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
snake_case_ : List[Any] = self._get_uniform_logits(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Optional[int] = 15
snake_case_ : List[Any] = min_dist_processor(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
self.assertFalse(jnp.isinf(_UpperCAmelCase ).any() )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = 20
snake_case_ : Dict = 4
snake_case_ : int = 0
snake_case_ : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
snake_case_ : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
snake_case_ : Tuple = 1
snake_case_ : Union[str, Any] = self._get_uniform_logits(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Any = logits_processor(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
snake_case_ : str = 3
snake_case_ : Optional[int] = self._get_uniform_logits(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Optional[int] = logits_processor(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
self.assertFalse(jnp.isinf(_UpperCAmelCase ).any() )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = 20
snake_case_ : List[str] = 4
snake_case_ : Tuple = 0
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
snake_case_ : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
snake_case_ : str = 4
snake_case_ : str = self._get_uniform_logits(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Dict = logits_processor(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
snake_case_ : int = 3
snake_case_ : str = self._get_uniform_logits(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : List[Any] = logits_processor(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
self.assertFalse(jnp.isinf(_UpperCAmelCase ).any() )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[str] = 4
snake_case_ : Union[str, Any] = 10
snake_case_ : Optional[Any] = 15
snake_case_ : str = 2
snake_case_ : int = 1
snake_case_ : List[str] = 15
# dummy input_ids and scores
snake_case_ : List[Any] = ids_tensor((batch_size, sequence_length) , _UpperCAmelCase )
snake_case_ : List[str] = input_ids.copy()
snake_case_ : Any = self._get_uniform_logits(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : int = scores.copy()
# instantiate all dist processors
snake_case_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case_ : Union[str, Any] = FlaxTopKLogitsWarper(3 )
snake_case_ : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
snake_case_ : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_UpperCAmelCase )
snake_case_ : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_UpperCAmelCase )
snake_case_ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
snake_case_ : int = 10
# no processor list
snake_case_ : Dict = temp_dist_warp(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : Any = top_k_warp(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : Union[str, Any] = top_p_warp(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : Union[str, Any] = min_dist_proc(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : str = bos_dist_proc(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : Dict = eos_dist_proc(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
# with processor list
snake_case_ : int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
snake_case_ : str = processor(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = 4
snake_case_ : Dict = 10
snake_case_ : Optional[int] = 15
snake_case_ : List[Any] = 2
snake_case_ : Dict = 1
snake_case_ : Tuple = 15
# dummy input_ids and scores
snake_case_ : Tuple = ids_tensor((batch_size, sequence_length) , _UpperCAmelCase )
snake_case_ : str = input_ids.copy()
snake_case_ : List[str] = self._get_uniform_logits(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Union[str, Any] = scores.copy()
# instantiate all dist processors
snake_case_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
snake_case_ : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
snake_case_ : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_UpperCAmelCase )
snake_case_ : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_UpperCAmelCase )
snake_case_ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
snake_case_ : List[str] = 10
# no processor list
def run_no_processor_list(_A : Optional[Any] , _A : Dict , _A : Dict ):
snake_case_ : Optional[int] = temp_dist_warp(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : Optional[int] = top_k_warp(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : Dict = top_p_warp(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : Tuple = min_dist_proc(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : int = bos_dist_proc(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
snake_case_ : Dict = eos_dist_proc(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
return scores
# with processor list
def run_processor_list(_A : Optional[int] , _A : Any , _A : Optional[Any] ):
snake_case_ : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
snake_case_ : Any = processor(_UpperCAmelCase , _UpperCAmelCase , cur_len=_UpperCAmelCase )
return scores
snake_case_ : Union[str, Any] = jax.jit(_UpperCAmelCase )
snake_case_ : str = jax.jit(_UpperCAmelCase )
snake_case_ : int = jitted_run_no_processor_list(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ : List[str] = jitted_run_processor_list(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 327
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int = None , _UpperCAmelCase : int = None ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = pad_token_id
lowercase__ = max_length
lowercase__ = vocab
lowercase__ = merges
lowercase__ = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Optional[int] , _UpperCAmelCase : GPTaTokenizer , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [""" """.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
lowercase__ = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Any , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return cls(**_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tf_tokenizer(_UpperCAmelCase )
lowercase__ = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__ = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 305
| 0
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _UpperCAmelCase ( tf.keras.layers.Layer):
def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None ):
super().__init__()
snake_case_ : Optional[Any] = pad_token_id
snake_case_ : Optional[Any] = max_length
snake_case_ : List[str] = vocab
snake_case_ : Dict = merges
snake_case_ : str = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def _snake_case ( cls : Optional[int] , lowercase_ : GPTaTokenizer , *lowercase_ : List[Any] , **lowercase_ : List[Any] ):
snake_case_ : Tuple = [''' '''.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
snake_case_ : Any = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , lowercase_ : Union[str, os.PathLike] , *lowercase_ : str , **lowercase_ : List[Any] ):
snake_case_ : Any = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def _snake_case ( cls : Any , lowercase_ : Tuple ):
return cls(**_UpperCAmelCase )
def _snake_case ( self : Union[str, Any] ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _snake_case ( self : str , lowercase_ : Optional[Any] , lowercase_ : int = None ):
snake_case_ : Optional[int] = self.tf_tokenizer(_UpperCAmelCase )
snake_case_ : str = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
snake_case_ : Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
snake_case_, snake_case_ : Optional[int] = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 264
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
A : Optional[int] = 1_0_0
A : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase ( __magic_name__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ = set()
lowercase__ = 42
lowercase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( __magic_name__ : int = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __magic_name__ ):
if len(partition(__magic_name__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 305
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : int = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowercase_ ( UpperCAmelCase__ ):
__UpperCAmelCase = 'roberta'
def __init__( self , a=5_02_65 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=1 , a=0 , a=2 , a="absolute" , a=True , a=None , **a , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
class lowercase_ ( UpperCAmelCase__ ):
@property
def __a ( self ):
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 80
|
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = [0] * len(__magic_name__ )
lowercase__ = []
lowercase__ = [1] * len(__magic_name__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__magic_name__ ) ):
if indegree[i] == 0:
queue.append(__magic_name__ )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__magic_name__ )
print(max(__magic_name__ ) )
# Adjacency list of Graph
A : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 305
| 0
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase_ = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.task_name.lower()
class _UpperCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
snake_case = '''train'''
snake_case = '''dev'''
snake_case = '''test'''
class _UpperCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
def __init__( self : Dict , __UpperCAmelCase : GlueDataTrainingArguments , __UpperCAmelCase : PreTrainedTokenizerBase , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Union[str, Split] = Split.train , __UpperCAmelCase : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , _UpperCAmelCase , )
_A = args
_A = glue_processors[args.task_name]()
_A = glue_output_modes[args.task_name]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
_A = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_A = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_A , _A = label_list[2], label_list[1]
_A = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + ".lock"
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
_A = time.time()
_A = torch.load(_UpperCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
_A = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_A = self.processor.get_test_examples(args.data_dir )
else:
_A = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_A = examples[:limit_length]
_A = glue_convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , max_length=args.max_seq_length , label_list=_UpperCAmelCase , output_mode=self.output_mode , )
_A = time.time()
torch.save(self.features , _UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , __UpperCAmelCase : Tuple ):
'''simple docstring'''
return self.features[i]
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return self.label_list
| 79
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = gather(__magic_name__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = [state.process_index]
lowercase__ = gather_object(__magic_name__ )
assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = broadcast(__magic_name__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
if state.is_main_process:
lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase__ = torch.arange(state.num_processes ).to(state.device )
lowercase__ = pad_across_processes(__magic_name__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """sum""" )
lowercase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """mean""" )
lowercase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
main()
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__magic_name__ )
state.print("""testing gather_object""" )
test_gather_object(__magic_name__ )
state.print("""testing broadcast""" )
test_broadcast(__magic_name__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__magic_name__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(__magic_name__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(__magic_name__ )
if __name__ == "__main__":
main()
| 305
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BeitFeatureExtractor']
__UpperCAmelCase = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84
|
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305
| 0
|
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __magic_name__ ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 270
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 0
|
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : str = set({'''(''', '''[''', '''{'''} )
__lowerCAmelCase : Optional[int] = set({''')''', ''']''', '''}'''} )
__lowerCAmelCase : str = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(lowercase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase__ ) == 0 or (len(lowercase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase__ ) == 0
def _lowercase ( ):
__lowerCAmelCase : Union[str, Any] = input('''Enter sequence of brackets: ''' )
if is_balanced(lowercase__ ):
print(lowercase__ , '''is balanced''' )
else:
print(lowercase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 275
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = tmp_path / """file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
lowercase__ = tmp_path / """malformed_file.csv"""
lowercase__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_image.csv"""
lowercase__ = textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_label.csv"""
lowercase__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
@pytest.fixture
def UpperCamelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = tmp_path / """csv_with_int_list.csv"""
lowercase__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__magic_name__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__magic_name__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
lowercase__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with open(__magic_name__ , encoding="""utf-8""" ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
lowercase__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__magic_name__ ) for label in labels]
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __magic_name__ : [int(__magic_name__ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
lowercase__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 305
| 0
|
"""simple docstring"""
from ... import PretrainedConfig
SCREAMING_SNAKE_CASE_ : int = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class a ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase = "nezha"
def __init__( self: str , UpperCamelCase: List[Any]=2_11_28 , UpperCamelCase: int=7_68 , UpperCamelCase: Optional[int]=12 , UpperCamelCase: int=12 , UpperCamelCase: Tuple=30_72 , UpperCamelCase: Optional[int]="gelu" , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Optional[Any]=5_12 , UpperCamelCase: str=64 , UpperCamelCase: Dict=2 , UpperCamelCase: int=0.02 , UpperCamelCase: Union[str, Any]=1e-1_2 , UpperCamelCase: Dict=0.1 , UpperCamelCase: List[str]=0 , UpperCamelCase: str=2 , UpperCamelCase: str=3 , UpperCamelCase: List[str]=True , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = max_relative_position
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = classifier_dropout
A__ = use_cache
| 335
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['DPTFeatureExtractor']
A : int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 305
| 0
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __snake_case ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__):
snake_case__ : List[Any] = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 5_0_2_5_7 , __lowerCAmelCase : int = 1_0_2_4 , __lowerCAmelCase : int = 7_6_8 , __lowerCAmelCase : int = 1_2 , __lowerCAmelCase : int = 1_2 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "gelu_new" , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 1E-5 , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
_lowerCamelCase : Union[str, Any] = prefix_inner_dim
_lowerCamelCase : Union[str, Any] = prefix_hidden_dim
_lowerCamelCase : Union[str, Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCamelCase : str = (
nn.Linear(self.prefix_hidden_dim , _UpperCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCamelCase : str = GPTaConfig(
vocab_size=_UpperCAmelCase , n_positions=_UpperCAmelCase , n_embd=_UpperCAmelCase , n_layer=_UpperCAmelCase , n_head=_UpperCAmelCase , n_inner=_UpperCAmelCase , activation_function=_UpperCAmelCase , resid_pdrop=_UpperCAmelCase , embd_pdrop=_UpperCAmelCase , attn_pdrop=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase , initializer_range=_UpperCAmelCase , scale_attn_weights=_UpperCAmelCase , use_cache=_UpperCAmelCase , scale_attn_by_inverse_layer_idx=_UpperCAmelCase , reorder_and_upcast_attn=_UpperCAmelCase , )
_lowerCamelCase : Optional[int] = GPTaLMHeadModel(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.transformer.transformer.wte(_UpperCAmelCase )
_lowerCamelCase : Union[str, Any] = self.encode_prefix(_UpperCAmelCase )
_lowerCamelCase : Union[str, Any] = self.decode_prefix(_UpperCAmelCase )
_lowerCamelCase : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_lowerCamelCase : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_lowerCamelCase : int = torch.cat((dummy_token, input_ids) , dim=1 )
_lowerCamelCase : Any = self.transformer(inputs_embeds=_UpperCAmelCase , labels=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : torch.device ):
"""simple docstring"""
return torch.zeros(_UpperCAmelCase , self.prefix_length , dtype=torch.intaa , device=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return self.encode_prefix(_UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = torch.split(_UpperCAmelCase , 1 , dim=0 )
_lowerCamelCase : Any = []
_lowerCamelCase : List[str] = []
for feature in features:
_lowerCamelCase : Union[str, Any] = self.decode_prefix(feature.to(_UpperCAmelCase ) ) # back to the clip feature
# Only support beam search for now
_lowerCamelCase , _lowerCamelCase : str = self.generate_beam(
input_embeds=_UpperCAmelCase , device=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_lowerCamelCase : List[Any] = torch.stack(_UpperCAmelCase )
_lowerCamelCase : int = torch.stack(_UpperCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : int = 5 , __lowerCAmelCase : int = 6_7 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : Optional[int] = None , ):
"""simple docstring"""
_lowerCamelCase : int = eos_token_id
_lowerCamelCase : Dict = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.int )
_lowerCamelCase : Union[str, Any] = torch.zeros(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.bool )
if input_embeds is not None:
_lowerCamelCase : Any = input_embeds
else:
_lowerCamelCase : List[str] = self.transformer.transformer.wte(_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
_lowerCamelCase : str = self.transformer(inputs_embeds=_UpperCAmelCase )
_lowerCamelCase : Dict = outputs.logits
_lowerCamelCase : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCamelCase : Union[str, Any] = logits.softmax(-1 ).log()
if scores is None:
_lowerCamelCase , _lowerCamelCase : Any = logits.topk(_UpperCAmelCase , -1 )
_lowerCamelCase : Any = generated.expand(_UpperCAmelCase , *generated.shape[1:] )
_lowerCamelCase , _lowerCamelCase : Tuple = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_lowerCamelCase : Optional[Any] = next_tokens
else:
_lowerCamelCase : Optional[int] = tokens.expand(_UpperCAmelCase , *tokens.shape[1:] )
_lowerCamelCase : Tuple = torch.cat((tokens, next_tokens) , dim=1 )
else:
_lowerCamelCase : Any = -float(np.inf )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCamelCase : str = scores_sum / seq_lengths[:, None]
_lowerCamelCase , _lowerCamelCase : Optional[int] = scores_sum_average.view(-1 ).topk(_UpperCAmelCase , -1 )
_lowerCamelCase : List[str] = next_tokens // scores_sum.shape[1]
_lowerCamelCase : Tuple = seq_lengths[next_tokens_source]
_lowerCamelCase : Union[str, Any] = next_tokens % scores_sum.shape[1]
_lowerCamelCase : Any = next_tokens.unsqueeze(1 )
_lowerCamelCase : Tuple = tokens[next_tokens_source]
_lowerCamelCase : Dict = torch.cat((tokens, next_tokens) , dim=1 )
_lowerCamelCase : str = generated[next_tokens_source]
_lowerCamelCase : Optional[int] = scores_sum_average * seq_lengths
_lowerCamelCase : Dict = is_stopped[next_tokens_source]
_lowerCamelCase : Any = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_lowerCamelCase : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
_lowerCamelCase : Optional[int] = is_stopped + next_tokens.eq(_UpperCAmelCase ).squeeze()
if is_stopped.all():
break
_lowerCamelCase : Union[str, Any] = scores / seq_lengths
_lowerCamelCase : Tuple = scores.argsort(descending=_UpperCAmelCase )
# tokens tensors are already padded to max_seq_length
_lowerCamelCase : List[Any] = [tokens[i] for i in order]
_lowerCamelCase : Any = torch.stack(_UpperCAmelCase , dim=0 )
_lowerCamelCase : Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 72
|
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[float] , __magic_name__ : list[float] ) -> float:
"""simple docstring"""
lowercase__ = sorted(numsa + numsa )
lowercase__ , lowercase__ = divmod(len(__magic_name__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = [float(x) for x in input('Enter the elements of first array: ').split()]
A : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 305
| 0
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
a__ = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_a , **_a ) -> Optional[int]:
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(_UpperCAmelCase )
def __lowercase ( self , _a=None , _a=None , _a=None , **_a ) -> List[str]:
_a , _a : str = {}, {}
if padding is not None:
_a : List[str] = padding
if truncation is not None:
_a : str = truncation
if top_k is not None:
_a : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _a , _a = None , **_a ) -> str:
if isinstance(_UpperCAmelCase , (Image.Image, str) ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_a : Tuple = {'''image''': image, '''question''': question}
else:
_a : Optional[int] = image
_a : str = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
return results
def __lowercase ( self , _a , _a=False , _a=False ) -> List[str]:
_a : List[Any] = load_image(inputs['''image'''] )
_a : int = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )
_a : Tuple = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
model_inputs.update(_UpperCAmelCase )
return model_inputs
def __lowercase ( self , _a ) -> Dict:
_a : Union[str, Any] = self.model(**_UpperCAmelCase )
return model_outputs
def __lowercase ( self , _a , _a=5 ) -> Optional[int]:
if top_k > self.model.config.num_labels:
_a : Dict = self.model.config.num_labels
if self.framework == "pt":
_a : Dict = model_outputs.logits.sigmoid()[0]
_a , _a : List[str] = probs.topk(_UpperCAmelCase )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
_a : List[Any] = scores.tolist()
_a : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 235
|
A : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 305
| 0
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = params
__SCREAMING_SNAKE_CASE : Tuple = np.array(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = np.array([len(_UpperCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : str ):
"""simple docstring"""
return len(self.lengths )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.params.max_model_input_size
__SCREAMING_SNAKE_CASE : str = self.lengths > max_len
logger.info(F"Splitting {sum(_UpperCAmelCase )} too long sequences." )
def divide_chunks(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ):
return [l[i : i + n] for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )]
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Dict = []
if self.params.mlm:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__SCREAMING_SNAKE_CASE : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__SCREAMING_SNAKE_CASE : str = np.insert(_UpperCAmelCase , 0 , _UpperCAmelCase )
if sub_s[-1] != sep_id:
__SCREAMING_SNAKE_CASE : List[str] = np.insert(_UpperCAmelCase , len(_UpperCAmelCase ) , _UpperCAmelCase )
assert len(_UpperCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_UpperCAmelCase )
new_tok_ids.extend(_UpperCAmelCase )
new_lengths.extend([len(_UpperCAmelCase ) for l in sub_seqs] )
__SCREAMING_SNAKE_CASE : Any = np.array(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = np.array(_UpperCAmelCase )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = len(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.lengths > 1_1
__SCREAMING_SNAKE_CASE : Optional[Any] = self.token_ids[indices]
__SCREAMING_SNAKE_CASE : Optional[int] = self.lengths[indices]
__SCREAMING_SNAKE_CASE : Dict = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.params.special_tok_ids["""unk_token"""]
__SCREAMING_SNAKE_CASE : Dict = len(self )
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__SCREAMING_SNAKE_CASE : Tuple = (unk_occs / self.lengths) < 0.5
__SCREAMING_SNAKE_CASE : List[Any] = self.token_ids[indices]
__SCREAMING_SNAKE_CASE : Optional[int] = self.lengths[indices]
__SCREAMING_SNAKE_CASE : Any = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [t[0] for t in batch]
__SCREAMING_SNAKE_CASE : List[Any] = [t[1] for t in batch]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
# Max for paddings
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(_UpperCAmelCase )
# Pad token ids
if self.params.mlm:
__SCREAMING_SNAKE_CASE : List[str] = self.params.special_tok_ids["""pad_token"""]
else:
__SCREAMING_SNAKE_CASE : int = self.params.special_tok_ids["""unk_token"""]
__SCREAMING_SNAKE_CASE : str = [list(t.astype(_UpperCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(_UpperCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(_UpperCAmelCase )
assert all(len(_UpperCAmelCase ) == max_seq_len_ for t in tk_ )
__SCREAMING_SNAKE_CASE : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_UpperCAmelCase ) # (bs)
return tk_t, lg_t
| 112
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableDiffusionDiffEditPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
lowercase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
lowercase__ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_UpperCAmelCase , set_alpha_to_zero=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase__ = CLIPTextModel(_UpperCAmelCase )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=0 ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor((1, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe(**_UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__ = self.pipeline_class.from_pretrained(_UpperCAmelCase )
pipe_loaded.to(_UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_UpperCAmelCase , _UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe_loaded(**_UpperCAmelCase )[0]
lowercase__ = np.abs(output - output_loaded ).max()
self.assertLess(_UpperCAmelCase , 1E-4 )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_mask_inputs(_UpperCAmelCase )
lowercase__ = pipe.generate_mask(**_UpperCAmelCase )
lowercase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase__ = np.array([0] * 9 )
lowercase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase__ = DPMSolverMultistepScheduler(**_UpperCAmelCase )
lowercase__ = DPMSolverMultistepInverseScheduler(**_UpperCAmelCase )
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inversion_inputs(_UpperCAmelCase )
lowercase__ = pipe.invert(**_UpperCAmelCase ).images
lowercase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase__ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowerCamelCase__ (cls : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase__ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase__ = raw_image
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = """a bowl of fruit"""
lowercase__ = """a bowl of pears"""
lowercase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=_UpperCAmelCase , target_prompt=_UpperCAmelCase , generator=_UpperCAmelCase , )
lowercase__ = pipe.invert(
prompt=_UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_UpperCAmelCase , num_inference_steps=25 , ).latents
lowercase__ = pipe(
prompt=_UpperCAmelCase , mask_image=_UpperCAmelCase , image_latents=_UpperCAmelCase , generator=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase__ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 305
| 0
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_SCREAMING_SNAKE_CASE = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: List[str] = True
__magic_name__: Optional[int] = None
# Automatically constructed
__magic_name__: Any = "PIL.Image.Image"
__magic_name__: int = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__magic_name__: Union[str, Any] = field(default="Image" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.pa_type
def UpperCAmelCase_ ( self : Any , _A : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def UpperCAmelCase_ ( self : str , _A : dict , _A : Tuple=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
snake_case_ : Tuple = {}
snake_case_ ,snake_case_ : int = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
snake_case_ : str = PIL.Image.open(_UpperCAmelCase )
else:
snake_case_ : Any = path.split('::' )[-1]
try:
snake_case_ : Tuple = string_to_dict(_UpperCAmelCase , config.HUB_DATASETS_URL )['repo_id']
snake_case_ : int = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
snake_case_ : List[Any] = None
with xopen(_UpperCAmelCase , 'rb' , use_auth_token=_UpperCAmelCase ) as f:
snake_case_ : Any = BytesIO(f.read() )
snake_case_ : Dict = PIL.Image.open(bytes_ )
else:
snake_case_ : List[str] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCAmelCase_ ( self : int ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def UpperCAmelCase_ ( self : List[str] , _A : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
snake_case_ : Optional[Any] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.binary() )
snake_case_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case_ : Tuple = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() )
snake_case_ : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
snake_case_ : Optional[int] = storage.field('bytes' )
else:
snake_case_ : Optional[int] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
snake_case_ : Optional[Any] = storage.field('path' )
else:
snake_case_ : Dict = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() )
snake_case_ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
snake_case_ : Union[str, Any] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
snake_case_ : Dict = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() )
snake_case_ : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase , self.pa_type )
def UpperCAmelCase_ ( self : Optional[int] , _A : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_A : Any ):
with xopen(_UpperCAmelCase , 'rb' ) as f:
snake_case_ : Tuple = f.read()
return bytes_
snake_case_ : Optional[int] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case_ : List[str] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
snake_case_ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase , self.pa_type )
def SCREAMING_SNAKE_CASE__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
snake_case_ : Union[str, Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
snake_case_ : Any = image.format
else:
snake_case_ : Dict = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(__a , format=__a )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__ ( __a ):
if hasattr(__a , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__a )}
def SCREAMING_SNAKE_CASE__ ( __a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
snake_case_ : Tuple = array.dtype
snake_case_ : Optional[Any] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
snake_case_ : List[str] = dtype.kind
snake_case_ : Optional[Any] = dtype.itemsize
snake_case_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
snake_case_ : Any = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
snake_case_ : Optional[int] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
snake_case_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(__a )
snake_case_ : Union[str, Any] = np.dtype(__a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
snake_case_ : Any = PIL.Image.fromarray(array.astype(__a ) )
return {"path": None, "bytes": image_to_bytes(__a )}
def SCREAMING_SNAKE_CASE__ ( __a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
snake_case_ ,snake_case_ : List[str] = first_non_null_value(__a )
if isinstance(__a , __a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__a , np.ndarray ):
snake_case_ : Any = no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
elif isinstance(__a , PIL.Image.Image ):
snake_case_ : str = no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
else:
return objs
else:
return objs
| 327
|
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305
| 0
|
"""simple docstring"""
def __lowercase ( _a , _a , _a , _a ):
snake_case_ : Optional[Any] = [False] * len(_a )
snake_case_ : Tuple = []
queue.append(_a )
snake_case_ : List[Any] = True
while queue:
snake_case_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
snake_case_ : List[Any] = True
snake_case_ : int = u
return visited[t]
def __lowercase ( _a , _a , _a ):
snake_case_ : str = [-1] * (len(_a ))
snake_case_ : Union[str, Any] = 0
while bfs(_a , _a , _a , _a ):
snake_case_ : Dict = float('''Inf''' )
snake_case_ : str = sink
while s != source:
# Find the minimum value in select path
snake_case_ : Any = min(_a , graph[parent[s]][s] )
snake_case_ : Any = parent[s]
max_flow += path_flow
snake_case_ : str = sink
while v != source:
snake_case_ : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ : str = parent[v]
return max_flow
lowercase__ : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowercase__ : str = 0, 5
print(ford_fulkerson(graph, source, sink))
| 264
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = True
A__ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxBertModelTester(self )
@slow
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxBertModel.from_pretrained("""bert-base-cased""" )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 305
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a__ : int = logging.get_logger(__name__)
class lowercase_ ( UpperCAmelCase__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ):
super().__init__(**_UpperCAmelCase )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_56}
UpperCamelCase__ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(_UpperCAmelCase )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(_UpperCAmelCase , size=size["shortest_edge"] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(_UpperCAmelCase )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self , a , a , a = None , **a ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(_UpperCAmelCase )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 80
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
| 0
|
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
_A = str(bin(__lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
_A = str(bin(__lowercase ) )[2:]
if shift_amount >= len(__lowercase ):
return "0b0"
_A = binary_number[: len(__lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def __lowercase ( __lowercase , __lowercase ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
_A = "0" + str(bin(__lowercase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
_A = len(bin(__lowercase )[3:] ) # Find 2's complement of number
_A = bin(abs(__lowercase ) - (1 << binary_number_length) )[3:]
_A = (
"1" + "0" * (binary_number_length - len(__lowercase )) + binary_number
)
if shift_amount >= len(__lowercase ):
return "0b" + binary_number[0] * len(__lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = ShapEImgaImgPipeline
A__ = ['''image''']
A__ = ['''image''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
return 32
@property
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
return 8
@property
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ (self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
lowercase__ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=0 ) -> str:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
lowercase__ = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
lowercase__ = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 305
| 0
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case ( lowercase__ : str ) -> str:
'''simple docstring'''
return "".join(sorted(lowercase__ ) )
def _snake_case ( lowercase__ : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(lowercase__ )]
__UpperCAmelCase = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__UpperCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__UpperCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__UpperCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 84
|
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __magic_name__ : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase__ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowercase__ = BeautifulSoup(requests.get(__magic_name__ ).text , """html.parser""" )
lowercase__ = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 305
| 0
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
SCREAMING_SNAKE_CASE__ : List[Any] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dataset.iloc[:, 1:2].values
SCREAMING_SNAKE_CASE__ : Tuple = dataset.iloc[:, 2].values
SCREAMING_SNAKE_CASE__ : Any = train_test_split(X, y, test_size=0.2, random_state=0)
SCREAMING_SNAKE_CASE__ : int = PolynomialFeatures(degree=4)
SCREAMING_SNAKE_CASE__ : List[Any] = poly_reg.fit_transform(X)
SCREAMING_SNAKE_CASE__ : Optional[int] = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> List[Any]:
plt.scatter(__lowerCAmelCase , __lowerCAmelCase , color='''red''' )
plt.plot(__lowerCAmelCase , pol_reg.predict(poly_reg.fit_transform(__lowerCAmelCase ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 270
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''falcon'''
A__ = ['''past_key_values''']
def __init__(self : str , _UpperCAmelCase : Dict=6_5024 , _UpperCAmelCase : Optional[Any]=4544 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Optional[Any]=71 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=11 , _UpperCAmelCase : Optional[Any]=11 , **_UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("""n_embed""" , _UpperCAmelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 305
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.