code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 50 , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__lowerCAmelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCAmelCase = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , use_clipped_model_output=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 229 | '''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def UpperCamelCase_ ( snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = 3_84
__lowerCAmelCase = 7
if "tiny" in model_name:
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 6, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif "small" in model_name:
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif "base" in model_name:
__lowerCAmelCase = 1_28
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (4, 8, 16, 32)
__lowerCAmelCase = 12
__lowerCAmelCase = 5_12
elif "large" in model_name:
__lowerCAmelCase = 1_92
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (6, 12, 24, 48)
__lowerCAmelCase = 12
__lowerCAmelCase = 7_68
# set label information
__lowerCAmelCase = 1_50
__lowerCAmelCase = """huggingface/label-files"""
__lowerCAmelCase = """ade20k-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = SwinConfig(
embed_dim=snake_case_ , depths=snake_case_ , num_heads=snake_case_ , window_size=snake_case_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
__lowerCAmelCase = UperNetConfig(
backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def UpperCamelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
__lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : Any ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = dct.pop(snake_case_ )
__lowerCAmelCase = val
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : str ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase_ ( snake_case_ : str ) -> int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = x.shape
__lowerCAmelCase = x.reshape(snake_case_ , 4 , in_channel // 4 )
__lowerCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = x.shape
__lowerCAmelCase = x.reshape(snake_case_ , in_channel // 4 , 4 )
__lowerCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : Tuple ) -> int:
'''simple docstring'''
__lowerCAmelCase = x.shape[0]
__lowerCAmelCase = x.reshape(4 , in_channel // 4 )
__lowerCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : Dict ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = x.shape[0]
__lowerCAmelCase = x.reshape(in_channel // 4 , 4 )
__lowerCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
__lowerCAmelCase = model_name_to_url[model_name]
__lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" , file_name=snake_case_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(snake_case_ , param.shape )
__lowerCAmelCase = get_upernet_config(snake_case_ )
__lowerCAmelCase = UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(snake_case_ )
if "bn" in key:
__lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
__lowerCAmelCase = val
# rename keys
__lowerCAmelCase = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowerCAmelCase = reverse_correct_unfold_reduction_order(snake_case_ )
if "norm" in key:
__lowerCAmelCase = reverse_correct_unfold_norm_order(snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
__lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("""RGB""" )
__lowerCAmelCase = SegformerImageProcessor()
__lowerCAmelCase = processor(snake_case_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
__lowerCAmelCase = model(snake_case_ )
__lowerCAmelCase = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowerCAmelCase = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
__lowerCAmelCase = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
__lowerCAmelCase = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
__lowerCAmelCase = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 229 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
lowerCAmelCase = PandasConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
__A : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_UpperCAmelCase , (str, list, tuple)):
__A : Optional[Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : str = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__A : int = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files}))
return splits
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : Union[str, Any] = table_cast(_UpperCAmelCase , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase)):
with open(_UpperCAmelCase , 'rb') as f:
__A : Tuple = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase))
yield i, self._cast_table(_UpperCAmelCase) | 190 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( __snake_case : int , __snake_case : int = 2 , __snake_case : int = 1 , __snake_case : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__snake_case : int , __snake_case : int , __snake_case : int ) -> int:
return (pow(__snake_case , 2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
__A : int = seed
__A : Union[str, Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__A : List[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Optional[Any] = rand_fn(__snake_case , __snake_case , __snake_case )
__A : Any = rand_fn(__snake_case , __snake_case , __snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__A : Optional[int] = gcd(hare - tortoise , __snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__A : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
lowercase__ : List[str] = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""") | 190 | 1 |
import operator as op
a__ = """scaler.pt"""
a__ = """pytorch_model"""
a__ = """random_states"""
a__ = """optimizer"""
a__ = """scheduler"""
a__ = """pytorch_model.bin"""
a__ = """pytorch_model.bin.index.json"""
a__ = """model.safetensors"""
a__ = """model.safetensors.index.json"""
a__ = """1.10.2"""
a__ = """py38"""
a__ = """4.17.0"""
a__ = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
a__ = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
a__ = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
a__ = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
a__ = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
a__ = """2.0.1"""
a__ = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
a__ = ["""default""", """reduce-overhead""", """max-autotune"""]
a__ = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__ = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
a__ = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
a__ = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 317 |
import pprint
import requests
a__ = """https://zenquotes.io/api"""
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ = random_quotes()
pprint.pprint(response)
| 317 | 1 |
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
SCREAMING_SNAKE_CASE : Any = get_logger(__name__)
class __lowerCamelCase :
def __init__(self , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = (
os.path.join(__SCREAMING_SNAKE_CASE , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCAmelCase = Extractor
def A__ (self , lowerCamelCase ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCAmelCase = os.path.abspath(__SCREAMING_SNAKE_CASE )
return os.path.join(self.extract_dir , hash_url_to_filename(__SCREAMING_SNAKE_CASE ) )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(__SCREAMING_SNAKE_CASE ) and not (os.path.isdir(__SCREAMING_SNAKE_CASE ) and os.listdir(__SCREAMING_SNAKE_CASE ))
)
def A__ (self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase = self.extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
if not extractor_format:
return input_path
_lowerCAmelCase = self._get_output_path(__SCREAMING_SNAKE_CASE )
if self._do_extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return output_path
class __lowerCamelCase ( lowerCAmelCase_ ):
@classmethod
@abstractmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
...
class __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
__UpperCamelCase = []
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
return f.read(__SCREAMING_SNAKE_CASE )
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase = b"" ):
'''simple docstring'''
if not magic_number:
_lowerCAmelCase = max(len(__SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCAmelCase = cls.read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except OSError:
return False
return any(magic_number.startswith(__SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
class __lowerCamelCase ( lowerCAmelCase_ ):
@classmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return tarfile.is_tarfile(__SCREAMING_SNAKE_CASE )
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def resolved(lowerCamelCase ) -> str:
return os.path.realpath(os.path.abspath(__SCREAMING_SNAKE_CASE ) )
def badpath(lowerCamelCase , lowerCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ).startswith(__SCREAMING_SNAKE_CASE )
def badlink(lowerCamelCase , lowerCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCAmelCase = resolved(os.path.join(__SCREAMING_SNAKE_CASE , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = resolved(__SCREAMING_SNAKE_CASE )
for finfo in members:
if badpath(finfo.name , __SCREAMING_SNAKE_CASE ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = tarfile.open(__SCREAMING_SNAKE_CASE )
tar_file.extractall(__SCREAMING_SNAKE_CASE , members=TarExtractor.safemembers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
tar_file.close()
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x1F\x8B"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with gzip.open(__SCREAMING_SNAKE_CASE , """rb""" ) as gzip_file:
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [
b"""PK\x03\x04""",
b"""PK\x05\x06""", # empty archive
b"""PK\x07\x08""", # spanned archive
]
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase = b"" ):
'''simple docstring'''
if super().is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as fp:
_lowerCAmelCase = _EndRecData(__SCREAMING_SNAKE_CASE )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCAmelCase = fp.read(__SCREAMING_SNAKE_CASE ) # CD is where we expect it to be
if len(__SCREAMING_SNAKE_CASE ) == sizeCentralDir:
_lowerCAmelCase = struct.unpack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with zipfile.ZipFile(__SCREAMING_SNAKE_CASE , """r""" ) as zip_file:
zip_file.extractall(__SCREAMING_SNAKE_CASE )
zip_file.close()
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with lzma.open(__SCREAMING_SNAKE_CASE ) as compressed_file:
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = rarfile.RarFile(__SCREAMING_SNAKE_CASE )
rf.extractall(__SCREAMING_SNAKE_CASE )
rf.close()
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x28\xb5\x2F\xFD"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
_lowerCAmelCase = zstd.ZstdDecompressor()
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as ifh, open(__SCREAMING_SNAKE_CASE , """wb""" ) as ofh:
dctx.copy_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x42\x5A\x68"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
with bza.open(__SCREAMING_SNAKE_CASE , """rb""" ) as compressed_file:
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with pyazr.SevenZipFile(__SCREAMING_SNAKE_CASE , """r""" ) as archive:
archive.extractall(__SCREAMING_SNAKE_CASE )
class __lowerCamelCase ( lowerCAmelCase_ ):
__UpperCamelCase = [b"""\x04\x22\x4D\x18"""]
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__SCREAMING_SNAKE_CASE , """rb""" ) as compressed_file:
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as extracted_file:
shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class __lowerCamelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__UpperCamelCase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A__ (cls ):
'''simple docstring'''
return max(
len(__SCREAMING_SNAKE_CASE )
for extractor in cls.extractors.values()
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__SCREAMING_SNAKE_CASE , magic_number_length=__SCREAMING_SNAKE_CASE )
except OSError:
return b""
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = cls.infer_extractor_format(__SCREAMING_SNAKE_CASE )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A__ (cls , lowerCamelCase ): # <Added version="2.4.0"/>
'''simple docstring'''
_lowerCAmelCase = cls._get_magic_number_max_length()
_lowerCAmelCase = cls._read_magic_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE , magic_number=__SCREAMING_SNAKE_CASE ):
return extractor_format
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(__SCREAMING_SNAKE_CASE ) , exist_ok=__SCREAMING_SNAKE_CASE )
# Prevent parallel extractions
_lowerCAmelCase = str(Path(__SCREAMING_SNAKE_CASE ).with_suffix(""".lock""" ) )
with FileLock(__SCREAMING_SNAKE_CASE ):
shutil.rmtree(__SCREAMING_SNAKE_CASE , ignore_errors=__SCREAMING_SNAKE_CASE )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = extractor if extractor != """deprecated""" else extractor_format
else:
_lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__SCREAMING_SNAKE_CASE , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__SCREAMING_SNAKE_CASE ):
return extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) | 352 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }') | 317 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = "ernie_m"
SCREAMING_SNAKE_CASE : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , _UpperCamelCase : int = 2_5_0_0_0_2 , _UpperCamelCase : int = 7_6_8 , _UpperCamelCase : int = 1_2 , _UpperCamelCase : int = 1_2 , _UpperCamelCase : int = 3_0_7_2 , _UpperCamelCase : str = "gelu" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : int = 5_1_4 , _UpperCamelCase : float = 0.02 , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1e-05 , _UpperCamelCase : List[str]=None , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Optional[int]=0.0 , **_UpperCamelCase : Optional[Any] , ) ->Union[str, Any]:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = classifier_dropout
snake_case_ = is_decoder
snake_case_ = act_dropout | 8 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''') | 8 | 1 |
def UpperCamelCase ( _a = 1_0_0_0 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 252 |
def UpperCamelCase ( _a ) -> list:
'''simple docstring'''
if any(not isinstance(_a , _a ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(_a ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_a , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 252 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCAmelCase_ ( ) ->Tuple:
lowerCamelCase__ : Dict =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase__ : int =get_sagemaker_input()
else:
lowerCamelCase__ : List[str] =get_cluster_input()
return config
def lowerCAmelCase_ ( snake_case_ : List[Any]=None ) ->List[str]:
if subparsers is not None:
lowerCamelCase__ : Union[str, Any] =subparsers.add_parser('config' , description=snake_case_ )
else:
lowerCamelCase__ : Tuple =argparse.ArgumentParser('Accelerate config command' , description=snake_case_ )
parser.add_argument(
'--config_file' , default=snake_case_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowerCAmelCase_ ( snake_case_ : str ) ->List[Any]:
lowerCamelCase__ : Optional[int] =get_user_input()
if args.config_file is not None:
lowerCamelCase__ : Dict =args.config_file
else:
if not os.path.isdir(snake_case_ ):
os.makedirs(snake_case_ )
lowerCamelCase__ : Optional[Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(snake_case_ )
else:
config.to_yaml_file(snake_case_ )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCAmelCase_ ( ) ->Optional[Any]:
lowerCamelCase__ : Tuple =config_command_parser()
lowerCamelCase__ : Tuple =parser.parse_args()
config_command(snake_case_ )
if __name__ == "__main__":
main() | 126 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase_ :VQModel , lowerCamelCase_ :UNetaDModel , lowerCamelCase_ :DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :Any , lowerCamelCase_ :int = 1 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , **lowerCamelCase_ :Optional[int] , ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase_ , )
lowerCamelCase__ : List[Any] =latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ : int =latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase__ : int ='eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ : Optional[int] ={}
if accepts_eta:
lowerCamelCase__ : Dict =eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase__ : Union[str, Any] =self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# predict the noise residual
lowerCamelCase__ : List[str] =self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Optional[Any] =self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase__ : Tuple =self.vqvae.decode(lowerCamelCase_ ).sample
lowerCamelCase__ : Optional[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ : List[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : int =self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ ) | 126 | 1 |
"""simple docstring"""
from math import pow, sqrt
def __SCREAMING_SNAKE_CASE ( *A_ ):
lowerCAmelCase__ : Union[str, Any] = len(A_ ) > 0 and all(value > 0.0 for value in values )
return result
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A_ , A_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(A_ , A_ , A_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 74 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : int = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Dict = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Tuple = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 74 | 1 |
'''simple docstring'''
from functools import lru_cache
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : str = 2
__UpperCAmelCase : Tuple = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCAmelCase )
if n > 1:
factors.add(_lowerCAmelCase )
return factors
@lru_cache
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
return len(unique_prime_factors(_lowerCAmelCase ) )
def lowercase_ ( lowerCAmelCase__ : list ):
"""simple docstring"""
return len(set(_lowerCAmelCase ) ) in (0, 1)
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Any = 2
while True:
# Increment each value of a generated range
__UpperCAmelCase : Optional[Any] = [base + i for i in range(_lowerCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__UpperCAmelCase : Dict = [upf_len(_lowerCAmelCase ) for x in group]
checker.append(_lowerCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def lowercase_ ( lowerCAmelCase__ : int = 4 ):
"""simple docstring"""
__UpperCAmelCase : int = run(_lowerCAmelCase )
return results[0] if len(_lowerCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 254 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowerCamelCase : List[str] =logging.get_logger('''transformers.models.speecht5''')
lowerCamelCase : int ={
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
lowerCamelCase : List[Any] ={
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
lowerCamelCase : Union[str, Any] ={
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
lowerCamelCase : Any ={
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
lowerCamelCase : Optional[Any] ={
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
lowerCamelCase : int ={
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
lowerCamelCase : Dict ={
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
lowerCamelCase : Optional[Any] ={
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
lowerCamelCase : Union[str, Any] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowerCamelCase : int ={
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCamelCase : Tuple ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowerCamelCase : List[Any] =[]
lowerCamelCase : List[Any] =[
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
lowerCamelCase : Dict =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
lowerCamelCase : List[Any] =IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
lowerCamelCase : List[str] =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
for attribute in key.split("." ):
UpperCamelCase__ : List[str] = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
UpperCamelCase__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
UpperCamelCase__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCamelCase__ : int = value
elif weight_type == "weight_g":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : List[Any] = value
elif weight_type == "running_mean":
UpperCamelCase__ : int = value
elif weight_type == "running_var":
UpperCamelCase__ : Dict = value
elif weight_type == "num_batches_tracked":
UpperCamelCase__ : Optional[int] = value
else:
UpperCamelCase__ : str = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase__ , UpperCamelCase__ : Dict = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
UpperCamelCase__ : Any = []
if task == "s2t":
UpperCamelCase__ : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase__ : Dict = MAPPING_S2T
UpperCamelCase__ : Optional[int] = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : int = MAPPING_T2S
UpperCamelCase__ : str = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase__ : str = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase__ : Tuple = MAPPING_S2S
UpperCamelCase__ : int = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(__lowerCAmelCase , __lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
UpperCamelCase__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase__ , UpperCamelCase__ : Tuple = key.split(".*." )
if prefix in name and suffix in name:
UpperCamelCase__ : Any = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : List[Any] = name.split(__lowerCAmelCase )[0].split("." )[-2]
UpperCamelCase__ : Dict = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
UpperCamelCase__ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
UpperCamelCase__ : Union[str, Any] = "weight_v"
elif "bias" in name:
UpperCamelCase__ : Dict = "bias"
elif "weight" in name:
UpperCamelCase__ : int = "weight"
elif "running_mean" in name:
UpperCamelCase__ : Dict = "running_mean"
elif "running_var" in name:
UpperCamelCase__ : List[str] = "running_var"
elif "num_batches_tracked" in name:
UpperCamelCase__ : Optional[int] = "num_batches_tracked"
else:
UpperCamelCase__ : Any = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
UpperCamelCase__ : Dict = full_name.split("conv_layers." )[-1]
UpperCamelCase__ : List[str] = name.split("." )
UpperCamelCase__ : Any = int(items[0] )
UpperCamelCase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
UpperCamelCase__ : List[str] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
UpperCamelCase__ : Union[str, Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
UpperCamelCase__ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Union[str, Any]:
if config_path is not None:
UpperCamelCase__ : Optional[int] = SpeechTaConfig.from_pretrained(__lowerCAmelCase )
else:
UpperCamelCase__ : Tuple = SpeechTaConfig()
if task == "s2t":
UpperCamelCase__ : Any = config.max_text_positions
UpperCamelCase__ : str = SpeechTaForSpeechToText(__lowerCAmelCase )
elif task == "t2s":
UpperCamelCase__ : Dict = 1876
UpperCamelCase__ : List[str] = 600
UpperCamelCase__ : Optional[int] = config.max_speech_positions
UpperCamelCase__ : Optional[Any] = SpeechTaForTextToSpeech(__lowerCAmelCase )
elif task == "s2s":
UpperCamelCase__ : Any = 1876
UpperCamelCase__ : Tuple = config.max_speech_positions
UpperCamelCase__ : List[str] = SpeechTaForSpeechToSpeech(__lowerCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
UpperCamelCase__ : Optional[int] = SpeechTaTokenizer(__lowerCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase__ : Dict = AddedToken("<mask>" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
UpperCamelCase__ : str = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
UpperCamelCase__ : List[Any] = SpeechTaFeatureExtractor()
UpperCamelCase__ : Optional[Any] = SpeechTaProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
UpperCamelCase__ : int = torch.load(__lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint["model"] , __lowerCAmelCase , __lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase : Optional[Any] =parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 196 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : List[Any] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class __a ( A__ , A__ ):
_lowerCAmelCase : Union[str, Any] = '''bit'''
_lowerCAmelCase : List[str] = ['''preactivation''', '''bottleneck''']
_lowerCAmelCase : Any = ['''SAME''', '''VALID''']
def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=64 , SCREAMING_SNAKE_CASE : List[Any]=[2_56, 5_12, 10_24, 20_48] , SCREAMING_SNAKE_CASE : Union[str, Any]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE : str="preactivation" , SCREAMING_SNAKE_CASE : Any="relu" , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=32 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCamelCase__ : Any = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Dict = embedding_size
UpperCamelCase__ : Tuple = hidden_sizes
UpperCamelCase__ : Any = depths
UpperCamelCase__ : Optional[int] = layer_type
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : str = global_padding
UpperCamelCase__ : Any = num_groups
UpperCamelCase__ : str = drop_path_rate
UpperCamelCase__ : Optional[Any] = embedding_dynamic_padding
UpperCamelCase__ : Tuple = output_stride
UpperCamelCase__ : List[str] = width_factor
UpperCamelCase__ : Any = ["stem"] + [F'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE , out_indices=SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) | 196 | 1 |
'''simple docstring'''
def __snake_case ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(UpperCAmelCase_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number | (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number & ~(1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number ^ (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _A ( lowercase , lowercase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 81 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ) -> List[Any]:
a : Any = 1
a : str = 3
a : Any = (32, 32)
a : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def __a ( self ) -> Tuple:
torch.manual_seed(0 )
a : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def __a ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
a : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __a ( self ) -> Any:
torch.manual_seed(0 )
a : int = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def __a ( self ) -> Optional[int]:
def extract(*lowerCAmelCase__ , **lowerCAmelCase__ ):
class __UpperCamelCase :
def __init__( self ) -> Optional[int]:
a : Dict = torch.ones([0] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def __a ( self ) -> List[Any]:
a : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Any = self.dummy_cond_unet
a : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
a : str = self.dummy_vae
a : List[Any] = self.dummy_text_encoder
a : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
a : List[str] = 77
a : List[str] = self.dummy_image.to(lowerCAmelCase__ )
a : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
a : str = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
a : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
a : Tuple = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a : Dict = "A painting of a squirrel eating a burger"
a : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
a : List[str] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , )
a : Tuple = output.images
a : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
a : List[str] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
a : List[Any] = image[0, -3:, -3:, -1]
a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : List[Any] = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __a ( self ) -> Optional[int]:
a : Optional[Any] = self.dummy_cond_unet
a : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
a : List[Any] = self.dummy_vae
a : Tuple = self.dummy_text_encoder
a : str = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
a : List[Any] = 77
a : Tuple = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
a : Optional[int] = unet.half()
a : int = vae.half()
a : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
a : Dict = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
a : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
a : str = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a : List[Any] = "A painting of a squirrel eating a burger"
a : Optional[Any] = torch.manual_seed(0 )
a : Union[str, Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __a ( self ) -> Any:
a : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
a : str = init_image.resize((760, 504) )
a : Dict = "BAAI/AltDiffusion"
a : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
a : str = "A fantasy landscape, trending on artstation"
a : str = torch.manual_seed(0 )
a : Any = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.75 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
a : Optional[Any] = output.images[0]
a : List[str] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
a : Dict = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Union[str, Any]:
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
a : Optional[int] = init_image.resize((768, 512) )
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
a : List[str] = "BAAI/AltDiffusion"
a : List[str] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
a : Union[str, Any] = "A fantasy landscape, trending on artstation"
a : Optional[Any] = torch.manual_seed(0 )
a : str = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.75 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 359 |
"""simple docstring"""
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
a : Optional[Any] = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
a : int = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_lowercase ).content
if __name__ == "__main__":
a : str = input('''Enter Video/IGTV url: ''').strip()
a : str = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 79 | 0 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__:Optional[Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__:Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE__:str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
SCREAMING_SNAKE_CASE__:Any = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
SCREAMING_SNAKE_CASE__:Optional[int] = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
SCREAMING_SNAKE_CASE__:List[Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
SCREAMING_SNAKE_CASE__:Tuple = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
SCREAMING_SNAKE_CASE__:int = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class snake_case__ ( UpperCAmelCase__ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class snake_case__ ( UpperCAmelCase__ ):
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__:List[str] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
SCREAMING_SNAKE_CASE__:List[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
SCREAMING_SNAKE_CASE__:Union[str, Any] = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(UpperCAmelCase__ )
class snake_case__ :
def __call__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
elif titles is None or texts is None:
__a = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__a = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles]
__a = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts]
__a = len(SCREAMING_SNAKE_CASE__ )
__a = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts." )
__a = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"]
__a = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"]
__a = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
}
if return_attention_mask is not False:
__a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a = attention_mask
return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 16 , lowerCamelCase = 64 , lowerCamelCase = 4 , ):
__a = reader_input["input_ids"]
__a , __a , __a = reader_output[:3]
__a = len(SCREAMING_SNAKE_CASE__ )
__a = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ )
__a = []
for doc_id in sorted_docs:
__a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a = sequence_ids.index(self.pad_token_id )
else:
__a = len(SCREAMING_SNAKE_CASE__ )
__a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a = sorted(SCREAMING_SNAKE_CASE__ , key=lambda lowerCamelCase : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
__a = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" )
__a = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class snake_case__ ( UpperCAmelCase__, UpperCAmelCase__ ):
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
_snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
| 261 | '''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_A : List[Any] = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
_A : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = """https://pypi.org/pypi/diffusers/json"""
__lowerCAmelCase = json.loads(request.urlopen(snake_case_ ).read() )["""releases"""].keys()
return sorted(snake_case_ , key=lambda snake_case_ : version.Version(snake_case_ ) )
def UpperCamelCase_ ( ) -> int:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__lowerCAmelCase = Path(snake_case_ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] ) -> Dict:
'''simple docstring'''
init_hf_modules()
__lowerCAmelCase = Path(snake_case_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__lowerCAmelCase = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCamelCase_ ( snake_case_ : int ) -> str:
'''simple docstring'''
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = f.read()
# Imports of the form `import .xxx`
__lowerCAmelCase = re.findall("""^\s*import\s+\.(\S+)\s*$""" , snake_case_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , snake_case_ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case_ ) )
def UpperCamelCase_ ( snake_case_ : int ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = False
__lowerCAmelCase = [module_file]
__lowerCAmelCase = []
# Let's recurse through all relative imports
while not no_change:
__lowerCAmelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case_ ) )
__lowerCAmelCase = Path(snake_case_ ).parent
__lowerCAmelCase = [str(module_path / m ) for m in new_imports]
__lowerCAmelCase = [f for f in new_import_files if f not in all_relative_imports]
__lowerCAmelCase = [f"""{f}.py""" for f in new_import_files]
__lowerCAmelCase = len(snake_case_ ) == 0
all_relative_imports.extend(snake_case_ )
return all_relative_imports
def UpperCamelCase_ ( snake_case_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = f.read()
# Imports of the form `import xxx`
__lowerCAmelCase = re.findall("""^\s*import\s+(\S+)\s*$""" , snake_case_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , snake_case_ , flags=re.MULTILINE )
# Only keep the top-level module
__lowerCAmelCase = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
__lowerCAmelCase = list(set(snake_case_ ) )
__lowerCAmelCase = []
for imp in imports:
try:
importlib.import_module(snake_case_ )
except ImportError:
missing_packages.append(snake_case_ )
if len(snake_case_ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f"""{", ".join(snake_case_ )}. Run `pip install {" ".join(snake_case_ )}`""" )
return get_relative_imports(snake_case_ )
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = module_path.replace(os.path.sep , """.""" )
__lowerCAmelCase = importlib.import_module(snake_case_ )
if class_name is None:
return find_pipeline_class(snake_case_ )
return getattr(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
__lowerCAmelCase = dict(inspect.getmembers(snake_case_ , inspect.isclass ) )
__lowerCAmelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case_ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
__lowerCAmelCase = cls
return pipeline_class
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
__lowerCAmelCase = str(snake_case_ )
__lowerCAmelCase = os.path.join(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__lowerCAmelCase = module_file_or_url
__lowerCAmelCase = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
__lowerCAmelCase = get_diffusers_versions()
# cut ".dev0"
__lowerCAmelCase = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
__lowerCAmelCase = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__lowerCAmelCase = f"""v{revision}"""
elif revision == "main":
__lowerCAmelCase = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
__lowerCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=snake_case_ , pipeline=snake_case_ )
try:
__lowerCAmelCase = cached_download(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , use_auth_token=snake_case_ , )
__lowerCAmelCase = """git"""
__lowerCAmelCase = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__lowerCAmelCase = hf_hub_download(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , use_auth_token=snake_case_ , )
__lowerCAmelCase = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__lowerCAmelCase = check_imports(snake_case_ )
# Now we move the module inside our cached dynamic modules.
__lowerCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case_ )
__lowerCAmelCase = Path(snake_case_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case_ , submodule_path / module_file )
for module_needed in modules_needed:
__lowerCAmelCase = f"""{module_needed}.py"""
shutil.copy(os.path.join(snake_case_ , snake_case_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = use_auth_token
elif use_auth_token is True:
__lowerCAmelCase = HfFolder.get_token()
else:
__lowerCAmelCase = None
__lowerCAmelCase = model_info(snake_case_ , revision=snake_case_ , token=snake_case_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__lowerCAmelCase = submodule_path / commit_hash
__lowerCAmelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case_ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case_ , f"""{module_needed}.py""" , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
return os.path.join(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[str] = None , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : Dict , ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = get_cached_module_file(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
return get_class_in_module(snake_case_ , final_module.replace(""".py""" , """""" ) )
| 229 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = 3_84
lowercase_ = 7
if "tiny" in model_name:
lowercase_ = 96
lowercase_ = (2, 2, 6, 2)
lowercase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowercase_ = 96
lowercase_ = (2, 2, 18, 2)
lowercase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowercase_ = 1_28
lowercase_ = (2, 2, 18, 2)
lowercase_ = (4, 8, 16, 32)
lowercase_ = 12
lowercase_ = 5_12
elif "large" in model_name:
lowercase_ = 1_92
lowercase_ = (2, 2, 18, 2)
lowercase_ = (6, 12, 24, 48)
lowercase_ = 12
lowercase_ = 7_68
# set label information
lowercase_ = 1_50
lowercase_ = """huggingface/label-files"""
lowercase_ = """ade20k-id2label.json"""
lowercase_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = SwinConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , num_heads=UpperCamelCase__ , window_size=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
lowercase_ = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = dct.pop(UpperCamelCase__ )
lowercase_ = val
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase_ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase_ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ = in_proj_weight[:dim, :]
lowercase_ = in_proj_bias[: dim]
lowercase_ = in_proj_weight[
dim : dim * 2, :
]
lowercase_ = in_proj_bias[
dim : dim * 2
]
lowercase_ = in_proj_weight[
-dim :, :
]
lowercase_ = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = x.shape
lowercase_ = x.reshape(UpperCamelCase__ , 4 , in_channel // 4 )
lowercase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
return x
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ = x.shape
lowercase_ = x.reshape(UpperCamelCase__ , in_channel // 4 , 4 )
lowercase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
return x
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = x.shape[0]
lowercase_ = x.reshape(4 , in_channel // 4 )
lowercase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(UpperCamelCase__ )
return x
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = x.shape[0]
lowercase_ = x.reshape(in_channel // 4 , 4 )
lowercase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(UpperCamelCase__ )
return x
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
lowercase_ = model_name_to_url[model_name]
lowercase_ = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , file_name=UpperCamelCase__ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(UpperCamelCase__ , param.shape )
lowercase_ = get_upernet_config(UpperCamelCase__ )
lowercase_ = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase_ = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
lowercase_ = key.replace("""bn""" , """batch_norm""" )
lowercase_ = val
# rename keys
lowercase_ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase_ = reverse_correct_unfold_reduction_order(UpperCamelCase__ )
if "norm" in key:
lowercase_ = reverse_correct_unfold_norm_order(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
lowercase_ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
lowercase_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
lowercase_ = SegformerImageProcessor()
lowercase_ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
lowercase_ = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase_ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
lowercase_ = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
lowercase_ = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
lowercase_ = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"upernet-swin-{size}" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 367 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase_ = Vector()
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase_) , """(0,0,0,0,0,1)""")
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase_) , 4)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = Vector([1, 2])
lowercase_ = Vector([1, 2, 3, 4, 5])
lowercase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([2, -1, 4]) # for test of dot product
lowercase_ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0)).count("""0""") , 1_0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 2, 3])
lowercase_ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase_ , lowerCAmelCase_)) , """(3,4,7)""")
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0, 0, 0, 0])
lowercase_ = x.copy()
self.assertEqual(str(lowerCAmelCase_) , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase_) , """(0,1,0)""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase_ = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(lowerCAmelCase_))
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 313 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: int , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Optional[int] ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 12 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*_a : List[str] ,**_a : Any ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Dict = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__UpperCamelCase : Union[str, Any] = datasets.load_iris()
__UpperCamelCase : Any = np.array(data['data'])
__UpperCamelCase : Dict = np.array(data['target'])
__UpperCamelCase : Union[str, Any] = data['target_names']
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = train_test_split(X, y)
def A ( _lowercase , _lowercase ):
return np.linalg.norm(np.array(_lowercase ) - np.array(_lowercase ) )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=5 ):
SCREAMING_SNAKE_CASE : int = zip(_lowercase , _lowercase )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE : str = []
for data_point in data:
SCREAMING_SNAKE_CASE : Optional[Any] = euclidean_distance(data_point[0] , _lowercase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE : Optional[int] = [i[1] for i in sorted(_lowercase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE : Optional[int] = Counter(_lowercase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 258 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase ( A_ ):
A__ : Union[List[PIL.Image.Image], np.ndarray]
A__ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 59 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a__ :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , _a=1_000 , ):
lowercase : Optional[Any] = parent
lowercase : Dict = batch_size
lowercase : str = seq_length
lowercase : List[Any] = is_training
lowercase : Dict = use_input_mask
lowercase : str = use_token_type_ids
lowercase : int = use_labels
lowercase : Union[str, Any] = vocab_size
lowercase : Dict = hidden_size
lowercase : List[str] = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Tuple = type_sequence_label_size
lowercase : Optional[int] = initializer_range
lowercase : Dict = num_labels
lowercase : Optional[int] = num_choices
lowercase : List[Any] = scope
lowercase : Dict = range_bbox
def __magic_name__ ( self ):
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowercase : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase : Any = bbox[i, j, 3]
lowercase : Optional[Any] = bbox[i, j, 1]
lowercase : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase : Dict = bbox[i, j, 2]
lowercase : List[str] = bbox[i, j, 0]
lowercase : List[Any] = t
lowercase : Any = tf.convert_to_tensor(_a )
lowercase : Dict = None
if self.use_input_mask:
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[int] = None
if self.use_token_type_ids:
lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[int] = None
lowercase : List[Any] = None
lowercase : Tuple = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
lowercase : str = TFLayoutLMModel(config=_a )
lowercase : Optional[Any] = model(_a , _a , attention_mask=_a , token_type_ids=_a )
lowercase : Dict = model(_a , _a , token_type_ids=_a )
lowercase : List[str] = model(_a , _a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
lowercase : List[Any] = TFLayoutLMForMaskedLM(config=_a )
lowercase : Union[str, Any] = model(_a , _a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
lowercase : Dict = self.num_labels
lowercase : Any = TFLayoutLMForSequenceClassification(config=_a )
lowercase : List[Any] = model(_a , _a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
lowercase : int = self.num_labels
lowercase : Dict = TFLayoutLMForTokenClassification(config=_a )
lowercase : Tuple = model(_a , _a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a , _a ):
lowercase : int = TFLayoutLMForQuestionAnswering(config=_a )
lowercase : Any = model(_a , _a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[Any] = config_and_inputs
lowercase : int = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class a__ ( a_, a_, unittest.TestCase ):
__lowerCAmelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = 10
def __magic_name__ ( self ):
lowercase : List[Any] = TFLayoutLMModelTester(self )
lowercase : List[Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
def __magic_name__ ( self ):
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __magic_name__ ( self ):
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __magic_name__ ( self ):
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def __magic_name__ ( self ):
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def __magic_name__ ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[str] = TFLayoutLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def __magic_name__ ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowercase : str = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowercase : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowercase : Tuple = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowercase : Optional[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowercase : List[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : Dict = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
lowercase , lowercase , lowercase , lowercase , lowercase : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : Optional[int] = model(input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
# test the sequence output on [0, :3, :3]
lowercase : Any = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowercase : Optional[Any] = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _a , atol=1E-3 ) )
@slow
def __magic_name__ ( self ):
# initialize model with randomly initialized sequence classification head
lowercase : List[Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : Optional[Any] = model(
input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowercase : Union[str, Any] = outputs.loss
lowercase : Union[str, Any] = (2,)
self.assertEqual(loss.shape , _a )
# test the shape of the logits
lowercase : List[str] = outputs.logits
lowercase : Optional[Any] = (2, 2)
self.assertEqual(logits.shape , _a )
@slow
def __magic_name__ ( self ):
# initialize model with randomly initialized token classification head
lowercase : Any = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
lowercase , lowercase , lowercase , lowercase , lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : List[Any] = model(
input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=_a )
# test the shape of the logits
lowercase : int = outputs.logits
lowercase : Optional[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _a )
@slow
def __magic_name__ ( self ):
# initialize model with randomly initialized token classification head
lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
lowercase : Optional[int] = model(input_ids=_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
# test the shape of the logits
lowercase : Any = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _a )
self.assertEqual(outputs.end_logits.shape , _a )
| 202 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class A ( unittest.TestCase , __UpperCamelCase ):
def lowercase_ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = load_tool("text-to-speech" )
self.tool.setup()
def lowercase_ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 356 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( __A, __A, __A=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ = ""
else:
UpperCAmelCase__ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = dct.pop(__A )
UpperCAmelCase__ = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(__A, stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = int(deit_name[-6:-4] )
UpperCAmelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase__ = 192
UpperCAmelCase__ = 768
UpperCAmelCase__ = 12
UpperCAmelCase__ = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase__ = 384
UpperCAmelCase__ = 1_536
UpperCAmelCase__ = 12
UpperCAmelCase__ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase__ = 1_024
UpperCAmelCase__ = 4_096
UpperCAmelCase__ = 24
UpperCAmelCase__ = 16
# load original model from timm
UpperCAmelCase__ = timm.create_model(__A, pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = timm_model.state_dict()
UpperCAmelCase__ = create_rename_keys(__A, __A )
for src, dest in rename_keys:
rename_key(__A, __A, __A )
read_in_q_k_v(__A, __A, __A )
# load HuggingFace model
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase__ = DeiTImageProcessor(size=__A, crop_size=config.image_size )
UpperCAmelCase__ = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase__ = encoding["pixel_values"]
UpperCAmelCase__ = model(__A )
UpperCAmelCase__ = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A, outputs.logits, atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
UpperCamelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : int = np.random.randn(3, 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE_ ), x.transpose() ) )
UpperCamelCase__ : int = np.random.randn(3, 4, 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE_, axes=(1, 2, 0) ), x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = np.random.randn(3, 4 )
UpperCamelCase__ : Any = torch.tensor(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE_ ), transpose(SCREAMING_SNAKE_CASE_ ).numpy() ) )
UpperCamelCase__ : Optional[int] = np.random.randn(3, 4, 5 )
UpperCamelCase__ : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE_, axes=(1, 2, 0) ), transpose(SCREAMING_SNAKE_CASE_, axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = np.random.randn(3, 4 )
UpperCamelCase__ : List[str] = tf.constant(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE_ ), transpose(SCREAMING_SNAKE_CASE_ ).numpy() ) )
UpperCamelCase__ : Any = np.random.randn(3, 4, 5 )
UpperCamelCase__ : Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE_, axes=(1, 2, 0) ), transpose(SCREAMING_SNAKE_CASE_, axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Tuple = np.random.randn(3, 4 )
UpperCamelCase__ : Optional[int] = jnp.array(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE_ ), np.asarray(transpose(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase__ : Tuple = np.random.randn(3, 4, 5 )
UpperCamelCase__ : List[str] = jnp.array(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE_, axes=(1, 2, 0) ), np.asarray(transpose(SCREAMING_SNAKE_CASE_, axes=(1, 2, 0) ) ) ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = np.random.randn(3, 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE_, (4, 3) ), np.reshape(SCREAMING_SNAKE_CASE_, (4, 3) ) ) )
UpperCamelCase__ : Any = np.random.randn(3, 4, 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE_, (12, 5) ), np.reshape(SCREAMING_SNAKE_CASE_, (12, 5) ) ) )
@require_torch
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = np.random.randn(3, 4 )
UpperCamelCase__ : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE_, (4, 3) ), reshape(SCREAMING_SNAKE_CASE_, (4, 3) ).numpy() ) )
UpperCamelCase__ : Dict = np.random.randn(3, 4, 5 )
UpperCamelCase__ : List[str] = torch.tensor(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE_, (12, 5) ), reshape(SCREAMING_SNAKE_CASE_, (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Dict = np.random.randn(3, 4 )
UpperCamelCase__ : Any = tf.constant(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE_, (4, 3) ), reshape(SCREAMING_SNAKE_CASE_, (4, 3) ).numpy() ) )
UpperCamelCase__ : Tuple = np.random.randn(3, 4, 5 )
UpperCamelCase__ : List[Any] = tf.constant(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE_, (12, 5) ), reshape(SCREAMING_SNAKE_CASE_, (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = np.random.randn(3, 4 )
UpperCamelCase__ : List[str] = jnp.array(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE_, (4, 3) ), np.asarray(reshape(SCREAMING_SNAKE_CASE_, (4, 3) ) ) ) )
UpperCamelCase__ : int = np.random.randn(3, 4, 5 )
UpperCamelCase__ : Any = jnp.array(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE_, (12, 5) ), np.asarray(reshape(SCREAMING_SNAKE_CASE_, (12, 5) ) ) ) )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = np.random.randn(1, 3, 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE_ ), np.squeeze(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase__ : Any = np.random.randn(1, 4, 1, 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE_, axis=2 ), np.squeeze(SCREAMING_SNAKE_CASE_, axis=2 ) ) )
@require_torch
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = np.random.randn(1, 3, 4 )
UpperCamelCase__ : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE_ ), squeeze(SCREAMING_SNAKE_CASE_ ).numpy() ) )
UpperCamelCase__ : Optional[Any] = np.random.randn(1, 4, 1, 5 )
UpperCamelCase__ : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE_, axis=2 ), squeeze(SCREAMING_SNAKE_CASE_, axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = np.random.randn(1, 3, 4 )
UpperCamelCase__ : List[str] = tf.constant(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE_ ), squeeze(SCREAMING_SNAKE_CASE_ ).numpy() ) )
UpperCamelCase__ : Union[str, Any] = np.random.randn(1, 4, 1, 5 )
UpperCamelCase__ : List[Any] = tf.constant(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE_, axis=2 ), squeeze(SCREAMING_SNAKE_CASE_, axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[Any] = np.random.randn(1, 3, 4 )
UpperCamelCase__ : Any = jnp.array(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE_ ), np.asarray(squeeze(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase__ : int = np.random.randn(1, 4, 1, 5 )
UpperCamelCase__ : Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE_, axis=2 ), np.asarray(squeeze(SCREAMING_SNAKE_CASE_, axis=2 ) ) ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Tuple = np.random.randn(3, 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE_, axis=1 ), np.expand_dims(SCREAMING_SNAKE_CASE_, axis=1 ) ) )
@require_torch
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = np.random.randn(3, 4 )
UpperCamelCase__ : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE_, axis=1 ), expand_dims(SCREAMING_SNAKE_CASE_, axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = np.random.randn(3, 4 )
UpperCamelCase__ : Dict = tf.constant(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE_, axis=1 ), expand_dims(SCREAMING_SNAKE_CASE_, axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = np.random.randn(3, 4 )
UpperCamelCase__ : Dict = jnp.array(SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE_, axis=1 ), np.asarray(expand_dims(SCREAMING_SNAKE_CASE_, axis=1 ) ) ) )
| 201 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Any = '<pad>'
lowerCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : int = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : List[str] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name )
lowerCAmelCase_ : Tuple = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = pickle.dumps(SCREAMING_SNAKE_CASE_ )
pickle.loads(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = self.get_rust_tokenizer()
lowerCAmelCase_ : Tuple = 'I was born in 92000, and this is falsé.'
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self.get_rust_tokenizer()
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Any = 'Hello World!'
lowerCAmelCase_ : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase_ : int = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
# fmt: off
lowerCAmelCase_ : List[str] = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 224 | 0 |
from __future__ import annotations
def lowercase_ ( A__ , A__ ) -> int:
"""simple docstring"""
if len(lowerCAmelCase__ ) < k or k < 0:
raise ValueError("Invalid Input" )
snake_case = snake_case = sum(array[:k] )
for i in range(len(lowerCAmelCase__ ) - k ):
snake_case = current_sum - array[i] + array[i + k]
snake_case = max(lowerCAmelCase__ , lowerCAmelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_A = [randint(-10_00, 10_00) for i in range(1_00)]
_A = randint(0, 1_10)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 361 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase_ ( A__ ) -> list[list[float]]:
"""simple docstring"""
snake_case = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
snake_case = [[0.0, 0.0], [0.0, 0.0]]
snake_case , snake_case = matrix[1][1], matrix[0][0]
snake_case , snake_case = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
snake_case = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
snake_case = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 137 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , ):
'''simple docstring'''
__A : Optional[Any] = parent
__A : int = 13
__A : int = 7
__A : Tuple = True
__A : Dict = True
__A : Union[str, Any] = False
__A : int = True
__A : Union[str, Any] = 99
__A : List[Any] = 32
__A : str = 2
__A : Dict = 4
__A : List[Any] = 37
__A : Tuple = 'gelu'
__A : Optional[int] = 0.1
__A : Tuple = 0.1
__A : List[Any] = 512
__A : Union[str, Any] = 16
__A : List[str] = 2
__A : Tuple = 0.02
__A : Optional[Any] = 3
__A : Optional[int] = 4
__A : Tuple = None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : Optional[int] = None
if self.use_input_mask:
__A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : List[Any] = None
__A : Union[str, Any] = None
__A : List[str] = None
if self.use_labels:
__A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__A : Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = TFDistilBertModel(config=_UpperCAmelCase)
__A : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : Optional[int] = model(_UpperCAmelCase)
__A : Optional[int] = [input_ids, input_mask]
__A : Optional[int] = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = TFDistilBertForMaskedLM(config=_UpperCAmelCase)
__A : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : Any = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = TFDistilBertForQuestionAnswering(config=_UpperCAmelCase)
__A : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
__A : Tuple = model(_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.num_labels
__A : List[Any] = TFDistilBertForSequenceClassification(_UpperCAmelCase)
__A : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = self.num_choices
__A : Optional[Any] = TFDistilBertForMultipleChoice(_UpperCAmelCase)
__A : List[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Union[str, Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1) , (1, self.num_choices, 1))
__A : Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
__A : Tuple = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = self.num_labels
__A : Optional[Any] = TFDistilBertForTokenClassification(_UpperCAmelCase)
__A : str = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : Union[str, Any] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
((__A) ,(__A) ,(__A) ,(__A) ,(__A) ,(__A)) : Union[str, Any] = config_and_inputs
__A : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowerCAmelCase = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = TFDistilBertModelTester(self)
__A : List[Any] = ConfigTester(self , config_class=_UpperCAmelCase , dim=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]):
__A : Tuple = TFDistilBertModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = TFDistilBertModel.from_pretrained('distilbert-base-uncased')
__A : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]])
__A : Optional[Any] = model(_UpperCAmelCase)[0]
__A : int = [1, 6, 768]
self.assertEqual(output.shape , _UpperCAmelCase)
__A : List[str] = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4) | 190 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : int = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''roberta-prelayernorm'''
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = vocab_size
__A : List[Any] = hidden_size
__A : Optional[int] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : List[str] = hidden_act
__A : Dict = intermediate_size
__A : Optional[int] = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : Tuple = max_position_embeddings
__A : Union[str, Any] = type_vocab_size
__A : Any = initializer_range
__A : str = layer_norm_eps
__A : int = position_embedding_type
__A : Optional[Any] = use_cache
__A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
]) | 190 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A : int = logging.get_logger(__name__)
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = ['''pixel_values''']
def __init__( self : Dict , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = 0.9 , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : Union[int, float] = 1 / 2_55 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , **__lowerCAmelCase : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = size if size is not None else {"""shortest_edge""": 2_24}
A__ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
A__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
A__ = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" )
A__ = do_resize
A__ = size
A__ = crop_pct
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a_ ( self : int , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[float] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
A__ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
A__ = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
A__ = int(size["""height"""] / crop_pct )
else:
A__ = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowerCAmelCase ) )
A__ = get_resize_output_image_size(__lowerCAmelCase , size=__lowerCAmelCase , default_to_square=__lowerCAmelCase )
else:
if "shortest_edge" in size:
A__ = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase )
elif "height" in size and "width" in size:
A__ = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowerCAmelCase ) )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
A__ = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[int, float] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict , ) -> List[str]:
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : int , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Union[float, List[float]] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[float, List[float]]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = crop_pct if crop_pct is not None else self.crop_pct
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" )
A__ = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
A__ = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , crop_pct=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
A__ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
A__ = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 366 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Optional[int] = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''roberta'''
def __init__( self : Any , __lowerCAmelCase : Tuple=5_02_65 , __lowerCAmelCase : Optional[int]=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Optional[Any]=30_72 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=5_12 , __lowerCAmelCase : int=2 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Dict=1e-12 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Dict="absolute" , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : str , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = nn.functional.normalize(SCREAMING_SNAKE_CASE )
lowerCAmelCase = nn.functional.normalize(SCREAMING_SNAKE_CASE )
return torch.mm(SCREAMING_SNAKE_CASE , normalized_text_embeds.t() )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
super().__init__(lowercase )
lowerCAmelCase = CLIPVisionModel(config.vision_config )
lowerCAmelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def _snake_case ( self , lowercase , lowercase ) -> Optional[Any]:
lowerCAmelCase = self.vision_model(lowercase )[1] # pooled_output
lowerCAmelCase = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
lowerCAmelCase = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
lowerCAmelCase = []
lowerCAmelCase = image_embeds.shape[0]
for i in range(lowercase ):
lowerCAmelCase = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCAmelCase = special_cos_dist[i][concept_idx]
lowerCAmelCase = self.special_care_embeds_weights[concept_idx].item()
lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
lowerCAmelCase = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCAmelCase = cos_dist[i][concept_idx]
lowerCAmelCase = self.concept_embeds_weights[concept_idx].item()
lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
lowerCAmelCase = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case ( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.vision_model(lowercase )[1] # pooled_output
lowerCAmelCase = self.visual_projection(lowercase )
lowerCAmelCase = cosine_distance(lowercase , self.special_care_embeds )
lowerCAmelCase = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCAmelCase = torch.any(special_scores > 0 , dim=1 )
lowerCAmelCase = special_care * 0.01
lowerCAmelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCAmelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCAmelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 46 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : list[str] ):
'''simple docstring'''
_UpperCAmelCase = ''''''
for word_or_phrase in separated:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 326 |
"""simple docstring"""
__A : Tuple = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Union[str, Any] = frozenset(["prompt", "negative_prompt"])
__A : str = frozenset([])
__A : List[str] = frozenset(["image"])
__A : Optional[Any] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Optional[int] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"])
__A : str = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[str] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : List[Any] = frozenset(["image", "mask_image"])
__A : List[str] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Tuple = frozenset(["example_image", "image", "mask_image"])
__A : Dict = frozenset(["class_labels"])
__A : str = frozenset(["class_labels"])
__A : str = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : str = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Any = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : List[str] = frozenset(["prompt", "negative_prompt"])
__A : Tuple = frozenset(["input_tokens"])
__A : Optional[int] = frozenset(["input_tokens"])
| 326 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase : int = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase : List[Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase : Tuple = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Dict) ->MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence'''),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''') , id='''sequence''') , id='''references'''),
}) , )
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[List[List[str]]] , UpperCAmelCase__ : List[List[str]] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 4 , ) ->Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase__ , hypotheses=UpperCAmelCase__ , min_len=UpperCAmelCase__ , max_len=UpperCAmelCase__)
}
| 368 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
create_state_space_tree(lowercase_ , [] , 0 )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 231 | 0 |
"""simple docstring"""
import datasets
_lowercase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
_lowercase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
_lowercase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def _snake_case ( snake_case__ : Dict , snake_case__ : List[str] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Union[str, Any] ) -> Optional[Any]:
return {"accuracy": simple_accuracy(A_ ,A_ )} | 74 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = ReformerTokenizer
__UpperCamelCase : Tuple = ReformerTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : Tuple = False
__UpperCamelCase : Tuple = True
def __magic_name__ ( self : int ):
"""simple docstring"""
super().setUp()
_A: Dict = ReformerTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Union[str, Any] = '<s>'
_A: Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_0_0 )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __magic_name__ ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_A: Any = self.get_tokenizer()
_A: List[str] = self.get_rust_tokenizer()
_A: Union[str, Any] = 'I was born in 92000, and this is falsé.'
_A: Dict = tokenizer.tokenize(lowerCAmelCase_ )
_A: Dict = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Any = self.get_rust_tokenizer()
_A: Tuple = tokenizer.encode(lowerCAmelCase_ )
_A: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : int=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
# Simple input
_A: int = 'This is a simple input'
_A: List[Any] = ['This is a simple input 1', 'This is a simple input 2']
_A: List[Any] = ('This is a simple input', 'This is a pair')
_A: Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(lowerCAmelCase_ , tokenizer_r.encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowerCAmelCase_ , tokenizer_r.batch_encode_plus , lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = ReformerTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
_A: Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
_A: int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_A: Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
_A: Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[Any] = 'Hello World!'
_A: Optional[Any] = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_A: Union[str, Any] = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@require_torch
@slow
def __magic_name__ ( self : int ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_A: Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
_A: List[str] = ' '.join(lowerCAmelCase_ )
_A: int = self.big_tokenizer.encode_plus(lowerCAmelCase_ , return_tensors='''pt''' )
_A: Union[str, Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
_A: Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_A: List[Any] = encoded_sequence['input_ids'].shape
_A: int = ReformerModel(lowerCAmelCase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase_ )
model(**lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[Any] = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_A: Tuple = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=lowerCAmelCase_ , sequences=lowerCAmelCase_ , )
| 356 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase__ : Any = getLogger(__name__)
UpperCAmelCase__ : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase__ ( a , a , a , a = 8 , a = DEFAULT_DEVICE , a=False , a="summarization" , a=None , **a , ) -> Dict:
_A: str = Path(a ).open('''w''' , encoding='''utf-8''' )
_A: Optional[Any] = str(a )
_A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a ).to(a )
if fpaa:
_A: Any = model.half()
_A: Optional[int] = AutoTokenizer.from_pretrained(a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_A: Any = time.time()
# update config with task specific params
use_task_specific_params(a , a )
if prefix is None:
_A: int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(a , a ) ) ):
_A: int = [prefix + text for text in examples_chunk]
_A: str = tokenizer(a , return_tensors='''pt''' , truncation=a , padding='''longest''' ).to(a )
_A: str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a , )
_A: str = tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
_A: Optional[int] = int(time.time() - start_time ) # seconds
_A: Union[str, Any] = len(a )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowerCamelCase__ ( a=True ) -> Optional[Any]:
_A: str = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=a , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=a , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=a , required=a , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=a , required=a , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=a , required=a , default=a , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=a , default=-1 , required=a , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=a , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A: Tuple = parser.parse_known_args()
_A: List[str] = parse_numeric_n_bool_cl_kwargs(a )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_A: int = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
_A: Dict = generate_summaries_or_translations(
a , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a , )
if args.reference_path is None:
return {}
# Compute scores
_A: Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
_A: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_A: Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a )]
_A: dict = score_fn(a , a )
scores.update(a )
if args.dump_args:
scores.update(a )
if args.info:
_A: Optional[Any] = args.info
if verbose:
print(a )
if args.score_path is not None:
json.dump(a , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=0 ) -> Optional[int]:
'''simple docstring'''
lowercase__: int = np.random.RandomState(lowerCAmelCase__ )
lowercase__: Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Any = self.get_dummy_inputs()
lowercase__: List[Any] = pipe(**lowerCAmelCase__ ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__: Optional[Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__: Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[Any] = self.get_dummy_inputs()
lowercase__: int = pipe(**lowerCAmelCase__ ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__: int = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__: Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[Any] = self.get_dummy_inputs()
lowercase__: Optional[int] = pipe(**lowerCAmelCase__ ).images
lowercase__: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__: Any = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__: Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: List[Any] = self.get_dummy_inputs()
lowercase__: List[Any] = pipe(**lowerCAmelCase__ ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__: List[Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__: Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Any = self.get_dummy_inputs()
lowercase__: str = pipe(**lowerCAmelCase__ ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__: Any = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase__: int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Any = self.get_dummy_inputs()
lowercase__: List[str] = pipe(**lowerCAmelCase__ ).images
lowercase__: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase__: Union[str, Any] = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Any = self.get_dummy_inputs()
lowercase__: Optional[int] = 3 * [inputs['prompt']]
# forward
lowercase__: Dict = pipe(**lowerCAmelCase__ )
lowercase__: Optional[Any] = output.images[0, -3:, -3:, -1]
lowercase__: Any = self.get_dummy_inputs()
lowercase__: Tuple = 3 * [inputs.pop('prompt' )]
lowercase__: Optional[int] = pipe.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='np' , )
lowercase__: Union[str, Any] = text_inputs['input_ids']
lowercase__: List[Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowercase__: List[Any] = prompt_embeds
# forward
lowercase__: Any = pipe(**lowerCAmelCase__ )
lowercase__: Optional[int] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: int = self.get_dummy_inputs()
lowercase__: Tuple = 3 * ['this is a negative prompt']
lowercase__: int = negative_prompt
lowercase__: Optional[int] = 3 * [inputs['prompt']]
# forward
lowercase__: List[str] = pipe(**lowerCAmelCase__ )
lowercase__: Optional[Any] = output.images[0, -3:, -3:, -1]
lowercase__: Any = self.get_dummy_inputs()
lowercase__: str = 3 * [inputs.pop('prompt' )]
lowercase__: Optional[Any] = []
for p in [prompt, negative_prompt]:
lowercase__: List[str] = pipe.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='np' , )
lowercase__: Optional[Any] = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowercase__ , lowercase__: List[Any] = embeds
# forward
lowercase__: Tuple = pipe(**lowerCAmelCase__ )
lowercase__: Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __a ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: str = ort.SessionOptions()
lowercase__: Optional[Any] = False
return options
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
# using the PNDM scheduler by default
lowercase__: int = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: List[str] = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
lowercase__: str = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
lowercase__: Tuple = output.images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: Optional[Any] = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowercase__: Any = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Dict = 'open neural network exchange'
lowercase__: str = np.random.RandomState(0 )
lowercase__: Tuple = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type='np' )
lowercase__: List[str] = output.images
lowercase__: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: Tuple = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowercase__: Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: int = 'open neural network exchange'
lowercase__: List[str] = np.random.RandomState(0 )
lowercase__: Tuple = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type='np' )
lowercase__: Dict = output.images
lowercase__: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: List[Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = 0
def test_callback_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
lowercase__: str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: List[str] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowercase__: Dict = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
lowercase__: Optional[int] = False
lowercase__: Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: str = 'Andromeda galaxy in a bottle'
lowercase__: Optional[Any] = np.random.RandomState(0 )
pipe(
prompt=lowerCAmelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: str = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert pipe.safety_checker is None
lowercase__: Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
lowercase__: str = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase__: int = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 196 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = '''RegNetConfig'''
# Base docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = [1, 10_88, 7, 7]
# Image classification docstring
__lowerCAmelCase = '''facebook/regnet-y-040'''
__lowerCAmelCase = '''tabby, tabby cat'''
__lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , )
lowercase__: str = nn.BatchNormad(lowerCAmelCase__ )
lowercase__: Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.convolution(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.normalization(lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase__: Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__: Dict = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__: Optional[int] = self.embedder(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase__: Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase__: Union[str, Any] = nn.BatchNormad(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: Any = self.convolution(lowerCAmelCase__ )
lowercase__: str = self.normalization(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__: Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__: str = nn.Sequential(
nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
# b c h w -> b c 1 1
lowercase__: str = self.pooler(lowerCAmelCase__ )
lowercase__: List[str] = self.attention(lowerCAmelCase__ )
lowercase__: List[Any] = hidden_state * attention
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: str = in_channels != out_channels or stride != 1
lowercase__: Optional[int] = max(1 , out_channels // config.groups_width )
lowercase__: Union[str, Any] = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Tuple = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Dict = hidden_state
lowercase__: Union[str, Any] = self.layer(lowerCAmelCase__ )
lowercase__: int = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = in_channels != out_channels or stride != 1
lowercase__: List[str] = max(1 , out_channels // config.groups_width )
lowercase__: Any = (
RegNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: str = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Union[str, Any] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = hidden_state
lowercase__: Optional[int] = self.layer(lowerCAmelCase__ )
lowercase__: str = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase__: Optional[int] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowercase__: str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: str = self.layers(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase__: int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase__: List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
lowercase__: List[Any] = stage_module(lowerCAmelCase__ )
if output_hidden_states:
lowercase__: Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ )
class __a ( __UpperCamelCase ):
__lowercase : Dict = RegNetConfig
__lowercase : Dict = 'regnet'
__lowercase : str = 'pixel_values'
__lowercase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Any = value
__lowerCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Tuple = config
lowercase__: List[str] = RegNetEmbeddings(lowerCAmelCase__ )
lowercase__: Optional[int] = RegNetEncoder(lowerCAmelCase__ )
lowercase__: Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase__: List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = self.embedder(lowerCAmelCase__ )
lowercase__: List[Any] = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Optional[Any] = encoder_outputs[0]
lowercase__: Optional[int] = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Dict = config.num_labels
lowercase__: Dict = RegNetModel(lowerCAmelCase__ )
# classification head
lowercase__: str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowercase__: str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Optional[int] = self.regnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase__: List[str] = self.classifier(lowerCAmelCase__ )
lowercase__: Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Optional[int] = 'single_label_classification'
else:
lowercase__: Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: List[Any] = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowercase__: Dict = CrossEntropyLoss()
lowercase__: Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: List[Any] = BCEWithLogitsLoss()
lowercase__: Any = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
lowercase__: int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
| 196 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
def __init__( self :int ,__snake_case :int ,__snake_case :str=13 ,__snake_case :int=32 ,__snake_case :Union[str, Any]=2 ,__snake_case :int=3 ,__snake_case :List[str]=16 ,__snake_case :List[str]=[1, 2, 1] ,__snake_case :List[Any]=[2, 2, 4] ,__snake_case :List[str]=2 ,__snake_case :Union[str, Any]=2.0 ,__snake_case :List[Any]=True ,__snake_case :List[str]=0.0 ,__snake_case :str=0.0 ,__snake_case :Optional[Any]=0.1 ,__snake_case :str="gelu" ,__snake_case :List[str]=False ,__snake_case :Tuple=True ,__snake_case :Tuple=0.02 ,__snake_case :Optional[int]=1E-5 ,__snake_case :Any=True ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[int]=True ,__snake_case :Any=10 ,__snake_case :Any=8 ,) -> Dict:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = embed_dim
a__ = depths
a__ = num_heads
a__ = window_size
a__ = mlp_ratio
a__ = qkv_bias
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = drop_path_rate
a__ = hidden_act
a__ = use_absolute_embeddings
a__ = patch_norm
a__ = layer_norm_eps
a__ = initializer_range
a__ = is_training
a__ = scope
a__ = use_labels
a__ = type_sequence_label_size
a__ = encoder_stride
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__( self :List[Any] ) -> int:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCamelCase__( self :str ,__snake_case :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :List[str] ) -> int:
a__ = SwinvaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
a__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
a__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :List[Any] ,__snake_case :int ) -> List[str]:
a__ = SwinvaForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a__ = 1
a__ = SwinvaForMaskedImageModeling(__snake_case )
model.to(__snake_case )
model.eval()
a__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ = model(__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[str] ,__snake_case :Optional[int] ,__snake_case :Tuple ) -> Optional[Any]:
a__ = self.type_sequence_label_size
a__ = SwinvaForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Any = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCAmelCase__ : List[str] = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ = SwinvaModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,embed_dim=37 )
def lowerCamelCase__( self :List[str] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__( self :List[str] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def lowerCamelCase__( self :int ) -> Dict:
pass
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case ,nn.Linear ) )
def lowerCamelCase__( self :Any ) -> List[Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case )
def lowerCamelCase__( self :Tuple ) -> Dict:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
for model_class in self.all_model_classes:
a__ = True
a__ = False
a__ = True
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.attentions
a__ = len(self.model_tester.depths )
self.assertEqual(len(__snake_case ) ,__snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ = True
a__ = config.window_size**2
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(__snake_case ) ,__snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
a__ = len(__snake_case )
# Check attention is always last and order is fine
a__ = True
a__ = True
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
if hasattr(self.model_tester ,'num_hidden_states_types' ):
a__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
a__ = 2
self.assertEqual(out_len + added_hidden_states ,len(__snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(__snake_case ) ,__snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def lowerCamelCase__( self :Any ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :List[Any] ,__snake_case :Any ) -> List[str]:
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.hidden_states
a__ = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__snake_case ) ,__snake_case )
# Swinv2 has a different seq_length
a__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
a__ = outputs.reshaped_hidden_states
self.assertEqual(len(__snake_case ) ,__snake_case )
a__ , a__ , a__ , a__ = reshaped_hidden_states[0].shape
a__ = (
reshaped_hidden_states[0].view(__snake_case ,__snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def lowerCamelCase__( self :Tuple ) -> int:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
a__ = True
self.check_hidden_states_output(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
self.check_hidden_states_output(__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
a__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
a__ = True
self.check_hidden_states_output(__snake_case ,__snake_case ,__snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
self.check_hidden_states_output(__snake_case ,__snake_case ,__snake_case ,(padded_height, padded_width) )
def lowerCamelCase__( self :Any ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = SwinvaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> Any:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
a__ = model_class(config=__snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
@require_vision
@require_torch
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :Any ) -> str:
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__( self :int ) -> Any:
a__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__snake_case )
a__ = self.default_image_processor
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
a__ = image_processor(images=__snake_case ,return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a__ = model(**__snake_case )
# verify the logits
a__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__snake_case )
a__ = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__snake_case ,atol=1E-4 ) )
| 109 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ): # noqa: E741
while r - l > 1:
a__ = (l + r) // 2
if v[m] >= key:
a__ = m
else:
a__ = m # noqa: E741
return r
def __lowercase ( __lowerCAmelCase : list[int] ):
if len(__lowerCAmelCase ) == 0:
return 0
a__ = [0] * len(__lowerCAmelCase )
a__ = 1
a__ = v[0]
for i in range(1 , len(__lowerCAmelCase ) ):
if v[i] < tail[0]:
a__ = v[i]
elif v[i] > tail[length - 1]:
a__ = v[i]
length += 1
else:
a__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( snake_case_ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Any , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : Tuple = size if size is not None else {'''shortest_edge''': 384}
SCREAMING_SNAKE_CASE_ : int = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : List[Any] = do_resize
SCREAMING_SNAKE_CASE_ : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE_ : List[Any] = crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE_ : Any = resample
SCREAMING_SNAKE_CASE_ : Any = do_rescale
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE_ : Dict = do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : float , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}')
SCREAMING_SNAKE_CASE_ : List[str] = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE_ : Tuple = int(shortest_edge / crop_pct)
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : Optional[int] = resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCAmelCase , **__UpperCAmelCase)
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : int = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE_ : Dict = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : int = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : str = make_list_of_images(__UpperCAmelCase)
if not valid_images(__UpperCAmelCase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : List[str] = [to_numpy_array(__UpperCAmelCase) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : List[str] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : List[str] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Any = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase) for image in images]
SCREAMING_SNAKE_CASE_ : List[str] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase)
| 91 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
snake_case : Any = logging.getLogger(__name__)
def __lowercase ( ):
a__ = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=__lowerCAmelCase , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=__lowerCAmelCase , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=__lowerCAmelCase , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=__lowerCAmelCase , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=__lowerCAmelCase , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=__lowerCAmelCase , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=__lowerCAmelCase , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
a__ = parser.parse_args()
return args
def __lowercase ( __lowerCAmelCase : Any ):
def fn(__lowerCAmelCase : Any ):
return tokenizer(examples['text'] )
return fn
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
a__ = []
for i in range(len(tokenized_data['input_ids'] ) ):
a__ = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
a__ = tf.train.Features(feature=__lowerCAmelCase )
a__ = tf.train.Example(features=__lowerCAmelCase )
a__ = example.SerializeToString()
records.append(__lowerCAmelCase )
return records
def __lowercase ( __lowerCAmelCase : List[str] ):
a__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a__ = min(len(__lowerCAmelCase ) , args.limit )
a__ = dataset.select(range(__lowerCAmelCase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
a__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
else:
a__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a__ = tokenize_function(__lowerCAmelCase )
a__ = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCAmelCase : int ):
# Concatenate all texts.
a__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
a__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a__ = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a__ = dataset_tokenized.map(__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=1_0_0_0 , num_proc=4 )
a__ = 0
a__ = 0
for shard in range(0 , len(__lowerCAmelCase ) , args.shard_size ):
a__ = grouped_dataset[shard : shard + args.shard_size]
a__ = len(dataset_snapshot['input_ids'] )
a__ = os.path.join(__lowerCAmelCase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
a__ = get_serialized_examples(__lowerCAmelCase )
with tf.io.TFRecordWriter(__lowerCAmelCase ) as out_file:
for i in range(len(__lowerCAmelCase ) ):
a__ = serialized_examples[i]
out_file.write(__lowerCAmelCase )
print('Wrote file {} containing {} records'.format(__lowerCAmelCase , __lowerCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , 'w' ) as f:
print(F'Total {args.split} records: {total_records}' , file=__lowerCAmelCase )
if __name__ == "__main__":
snake_case : str = parse_args()
main(args)
| 364 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCAmelCase__ : Optional[int] = ['''accelerate''', '''launch''']
UpperCAmelCase__ : List[str] = Path.home() / '''.cache/huggingface/accelerate'''
UpperCAmelCase__ : Union[str, Any] = '''default_config.yaml'''
UpperCAmelCase__ : List[Any] = config_folder / config_file
UpperCAmelCase__ : Optional[int] = config_folder / '''_default_config.yaml'''
UpperCAmelCase__ : int = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase__( cls :Any ) -> int:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase__( cls :Dict ) -> Union[str, Any]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() )
def lowerCamelCase__( self :str ) -> List[Any]:
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=__snake_case ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(__snake_case ), self.test_file_path] ,env=os.environ.copy() )
def lowerCamelCase__( self :int ) -> List[str]:
execute_subprocess_async(['accelerate', 'test'] ,env=os.environ.copy() )
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : List[Any] = '''test-tpu'''
UpperCAmelCase__ : str = '''us-central1-a'''
UpperCAmelCase__ : Optional[Any] = '''ls'''
UpperCAmelCase__ : Optional[int] = ['''accelerate''', '''tpu-config''']
UpperCAmelCase__ : Any = '''cd /usr/share'''
UpperCAmelCase__ : Tuple = '''tests/test_samples/test_command_file.sh'''
UpperCAmelCase__ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
a__ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] ,return_stdout=__snake_case ,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,__snake_case ,)
def lowerCamelCase__( self :int ) -> str:
a__ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=__snake_case ,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,__snake_case ,)
def lowerCamelCase__( self :Any ) -> Dict:
a__ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] ,return_stdout=__snake_case )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,)
def lowerCamelCase__( self :Tuple ) -> str:
a__ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] ,return_stdout=__snake_case ,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' ,__snake_case ,)
def lowerCamelCase__( self :Dict ) -> int:
a__ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] ,return_stdout=__snake_case ,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' ,__snake_case ,)
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
a__ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] ,return_stdout=__snake_case ,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,)
def lowerCamelCase__( self :int ) -> int:
a__ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=__snake_case ,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,)
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
a__ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] ,return_stdout=__snake_case ,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,)
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] ,return_stdout=__snake_case ,)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' ,__snake_case ,)
| 109 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313 | 0 |
_snake_case = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 343 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# Load checkpoint
_A : Optional[int] = torch.load(snake_case_,map_location="""cpu""" )
_A : Any = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_A : Tuple = v
else:
_A : Dict = v
_A : Optional[Any] = chkpt["""params"""]
_A : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(snake_case_,(torch.FloatTensor, numpy.ndarray) )}
_A : str = chkpt["""dico_word2id"""]
_A : Optional[Any] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""","""""" ): i for s, i in vocab.items()}
# Save pytorch-model
_A : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(snake_case_,snake_case_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(snake_case_,"""w""",encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_,indent=2 ) + """\n""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343 | 1 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_lowerCamelCase : List[Any] = 6_3_7_8_1_3_7.0
_lowerCamelCase : List[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
_lowerCamelCase : Optional[int] = 637_8137
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = (AXIS_A - AXIS_B) / AXIS_A
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
A = radians(UpperCAmelCase )
A = radians(UpperCAmelCase )
# Equation
A = sin((phi_a - phi_a) / 2 )
A = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A = sqrt(sin_sq_phi + (cos(UpperCAmelCase ) * cos(UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Any = "LayoutLMv3ImageProcessor"
SCREAMING_SNAKE_CASE_ : List[str] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ):
_lowercase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,UpperCAmelCase_ ,)
_lowercase : Any = kwargs.pop("""feature_extractor""" )
_lowercase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
def __call__( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = 0 ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
_lowercase : int = self.image_processor(images=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : str = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowercase : int = features["""words"""]
_lowercase : List[Any] = self.tokenizer(
text=text if text is not None else features["""words"""] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["""boxes"""] ,word_labels=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,stride=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_token_type_ids=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,return_overflowing_tokens=UpperCAmelCase_ ,return_special_tokens_mask=UpperCAmelCase_ ,return_offsets_mapping=UpperCAmelCase_ ,return_length=UpperCAmelCase_ ,verbose=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
# add pixel values
_lowercase : Optional[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
_lowercase : Dict = self.get_overflowing_images(UpperCAmelCase_ ,encoded_inputs["""overflow_to_sample_mapping"""] )
_lowercase : Dict = images
return encoded_inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_lowercase : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}""" )
return images_with_overflow
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
@property
def lowerCamelCase__ ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCamelCase__ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,UpperCAmelCase_ ,)
return self.image_processor_class
@property
def lowerCamelCase__ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,UpperCAmelCase_ ,)
return self.image_processor
| 336 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 1 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = 2_5_6
# Modulus to hash a string
UpperCAmelCase__ : str = 1_0_0_0_0_0_3
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = len(_snake_case )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : str = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE__ : Tuple = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE__ : int = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = """abc1abc12"""
SCREAMING_SNAKE_CASE__ : List[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
SCREAMING_SNAKE_CASE__ : Any = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_snake_case ,_snake_case ) and not rabin_karp(_snake_case ,_snake_case )
# Test 2)
SCREAMING_SNAKE_CASE__ : List[Any] = """ABABX"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """ABABZABABYABABX"""
assert rabin_karp(_snake_case ,_snake_case )
# Test 3)
SCREAMING_SNAKE_CASE__ : Any = """AAAB"""
SCREAMING_SNAKE_CASE__ : int = """ABAAAAAB"""
assert rabin_karp(_snake_case ,_snake_case )
# Test 4)
SCREAMING_SNAKE_CASE__ : str = """abcdabcy"""
SCREAMING_SNAKE_CASE__ : int = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_snake_case ,_snake_case )
# Test 5)
SCREAMING_SNAKE_CASE__ : int = """Lü"""
SCREAMING_SNAKE_CASE__ : str = """Lüsai"""
assert rabin_karp(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Any = """Lue"""
assert not rabin_karp(_snake_case ,_snake_case )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 25 | import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase__ : Union[str, Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : Optional[Any] = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case__ : Dict = int(re.match(r'.*layer_(\d*).*' , A__ )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def UpperCamelCase__ ( A__ ) -> str:
if dtype == torch.bool:
return 1 / 8
snake_case__ : List[str] = re.search(r'[^\d](\d+)$' , str(A__ ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
snake_case__ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
# Construct model
if bloom_config_file == "":
snake_case__ : Union[str, Any] = BloomConfig()
else:
snake_case__ : int = BloomConfig.from_json_file(A__ )
if shard_model:
snake_case__ : Tuple = os.listdir(A__ )
snake_case__ : str = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : str = {'weight_map': {}, 'metadata': {}}
snake_case__ : Optional[int] = 0
snake_case__ : Tuple = None
snake_case__ : Any = BloomConfig()
for j, file in enumerate(A__ ):
print('Processing file: {}'.format(A__ ) )
snake_case__ : str = None
for i in range(A__ ):
# load all TP files
snake_case__ : Optional[int] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[Any] = list(temp.keys() )
for key in keys:
snake_case__ : List[Any] = temp.pop(A__ )
if tensors is None:
snake_case__ : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[int] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Dict = tensors[key] / pretraining_tp
torch.save(
A__ , os.path.join(
A__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case__ : List[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case__ : Optional[int] = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) )
snake_case__ : Dict = BloomConfig()
snake_case__ : str = pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case__ : int = total_size
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : List[str] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n'
f.write(A__ )
else:
snake_case__ : int = BloomModel(A__ )
snake_case__ : Dict = os.listdir(A__ )
snake_case__ : Union[str, Any] = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : List[str] = None
for i, file in enumerate(A__ ):
snake_case__ : Dict = None
for i in range(A__ ):
# load all TP files
snake_case__ : List[Any] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[str] = list(temp.keys() )
for key in keys:
snake_case__ : Any = temp.pop(A__ )
if tensors is None:
snake_case__ : Union[str, Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Optional[int] = tensors[key] / pretraining_tp
snake_case__ : int = model.load_state_dict(A__ , strict=A__ )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
snake_case__ : List[Any] = set(other_keys.missing_keys )
else:
snake_case__ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A__ , exist_ok=A__ )
snake_case__ : Any = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case__ : List[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
snake_case__ : str = model.to(config.torch_dtype )
torch.save(model.state_dict() , A__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 143 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
@property
def lowerCAmelCase_ ( self ) -> str:
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case_ = self.dummy_uncond_unet
snake_case_ = ScoreSdeVeScheduler()
snake_case_ = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__A ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__A , return_dict=__A )[
0
]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = """google/ncsnpp-church-256"""
snake_case_ = UNetaDModel.from_pretrained(__A )
snake_case_ = ScoreSdeVeScheduler.from_pretrained(__A )
snake_case_ = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__A ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 351 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 34 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class snake_case_ :
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : Any ) ->List[Any]:
snake_case_ = data
snake_case_ = None
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) ->List[str]:
snake_case_ = None
snake_case_ = None
def __iter__( self : Union[str, Any] ) ->Iterator[Any]:
snake_case_ = self.head
while self.head:
yield node.data
snake_case_ = node.next
if node == self.head:
break
def __len__( self : Optional[int] ) ->int:
return sum(1 for _ in self )
def __repr__( self : Any ) ->Union[str, Any]:
return "->".join(str(_UpperCamelCase ) for item in iter(self ) )
def snake_case__( self : Tuple , _UpperCamelCase : Any ) ->None:
self.insert_nth(len(self ) , _UpperCamelCase )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Any ) ->None:
self.insert_nth(0 , _UpperCamelCase )
def snake_case__( self : Any , _UpperCamelCase : int , _UpperCamelCase : Any ) ->None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
snake_case_ = Node(_UpperCamelCase )
if self.head is None:
snake_case_ = new_node # first node points itself
snake_case_ = snake_case_ = new_node
elif index == 0: # insert at head
snake_case_ = self.head
snake_case_ = snake_case_ = new_node
else:
snake_case_ = self.head
for _ in range(index - 1 ):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = new_node
if index == len(self ) - 1: # insert at tail
snake_case_ = new_node
def snake_case__( self : Optional[int] ) ->Union[str, Any]:
return self.delete_nth(0 )
def snake_case__( self : str ) ->Any:
return self.delete_nth(len(self ) - 1 )
def snake_case__( self : List[Any] , _UpperCamelCase : int = 0 ) ->Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
snake_case_ = self.head
if self.head == self.tail: # just one node
snake_case_ = snake_case_ = None
elif index == 0: # delete head node
snake_case_ = self.tail.next.next
snake_case_ = self.head.next
else:
snake_case_ = self.head
for _ in range(index - 1 ):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case_ = temp
return delete_node.data
def snake_case__( self : Tuple ) ->bool:
return len(self ) == 0
def __SCREAMING_SNAKE_CASE ():
snake_case_ = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : int = logging.getLogger(__name__)
a_ : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a_ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A__ )} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowercase : bool = field(default=A__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowercase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowercase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowercase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowercase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
def _dataset(_UpperCAmelCase , _UpperCAmelCase=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask')
return LineByLineWithRefDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , ref_path=_UpperCAmelCase , )
return LineByLineTextDataset(tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size)
else:
return TextDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCAmelCase) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.')
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.')
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name')
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch')
SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_config(_UpperCAmelCase)
model.resize_token_embeddings(len(_UpperCAmelCase))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).')
if data_args.block_size <= 0:
SCREAMING_SNAKE_CASE = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
SCREAMING_SNAKE_CASE = min(data_args.block_size , tokenizer.max_len)
# Get datasets
SCREAMING_SNAKE_CASE = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
SCREAMING_SNAKE_CASE = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , evaluate=_UpperCAmelCase , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
SCREAMING_SNAKE_CASE = DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask(
tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability)
else:
SCREAMING_SNAKE_CASE = DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_collator=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , prediction_loss_only=_UpperCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=_UpperCAmelCase)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = math.exp(eval_output['eval_loss'])
SCREAMING_SNAKE_CASE = {'perplexity': perplexity}
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'eval_results_lm.txt')
if trainer.is_world_master():
with open(_UpperCAmelCase , 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s' , _UpperCAmelCase , str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
results.update(_UpperCAmelCase)
return results
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 137 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ : Tuple = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase__ :
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=13 , snake_case__ : str=7 , snake_case__ : Union[str, Any]=6 , snake_case__ : str=17 , snake_case__ : Any=23 , snake_case__ : int=11 , snake_case__ : Tuple=True , ):
lowerCamelCase_ : str =parent
lowerCamelCase_ : Union[str, Any] =batch_size
lowerCamelCase_ : List[Any] =seq_length
lowerCamelCase_ : Union[str, Any] =act_dim
lowerCamelCase_ : Optional[Any] =state_dim
lowerCamelCase_ : Optional[Any] =hidden_size
lowerCamelCase_ : Tuple =max_length
lowerCamelCase_ : List[Any] =is_training
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCamelCase_ : List[Any] =floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase_ : List[Any] =ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowerCamelCase_ : Optional[int] =random_attention_mask((self.batch_size, self.seq_length) )
lowerCamelCase_ : List[str] =self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase__ ( self : Any ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , ):
lowerCamelCase_ : Tuple =DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : str =model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : Optional[int] =config_and_inputs
lowerCamelCase_ : Optional[int] ={
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( snake_case__, snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
_UpperCAmelCase :int = ()
_UpperCAmelCase :int = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_UpperCAmelCase :Union[str, Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Dict =DecisionTransformerModelTester(self )
lowerCamelCase_ : str =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def UpperCAmelCase__ ( self : List[str] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : str =DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] =model_class(snake_case__ )
lowerCamelCase_ : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : List[Any] =[*signature.parameters.keys()]
lowerCamelCase_ : List[str] =[
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =2 # number of steps of autoregressive prediction we will perform
lowerCamelCase_ : int =10 # defined by the RL environment, may be normalized
lowerCamelCase_ : List[Any] =DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowerCamelCase_ : Union[str, Any] =model.to(snake_case__ )
lowerCamelCase_ : Any =model.config
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] =torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
lowerCamelCase_ : Optional[Any] =torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=snake_case__ )
lowerCamelCase_ : int =torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCamelCase_ : str =state
lowerCamelCase_ : Optional[int] =torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
lowerCamelCase_ : int =torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
lowerCamelCase_ : Tuple =torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
lowerCamelCase_ : str =torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
lowerCamelCase_ : Union[str, Any] =torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
lowerCamelCase_ : Optional[int] =torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCamelCase_ : str =action_pred[0, -1]
lowerCamelCase_ : Optional[int] =torch.cat([states, state] , dim=1 )
lowerCamelCase_ : Optional[Any] =returns_to_go[0, -1] - reward
lowerCamelCase_ : str =torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCamelCase_ : int =torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 209 | 1 |
'''simple docstring'''
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Union[str, Any]:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCAmelCase ( __snake_case : List[str] ) -> List[Any]:
__A : List[str] = create_tensor(_UpperCAmelCase )
__A : Tuple = gather(_UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCAmelCase ( __snake_case : Tuple ) -> Dict:
__A : Any = [state.process_index]
__A : List[Any] = gather_object(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == state.num_processes, f'{gathered_obj}, {len(_UpperCAmelCase )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), f'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Tuple:
__A : Any = create_tensor(_UpperCAmelCase )
__A : Tuple = broadcast(_UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__A : Dict = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__A : List[Any] = torch.arange(state.num_processes ).to(state.device )
__A : Tuple = pad_across_processes(_UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCAmelCase ( __snake_case : str ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Optional[Any] = create_tensor(_UpperCAmelCase )
__A : Optional[int] = reduce(_UpperCAmelCase , 'sum' )
__A : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ), f'{reduced_tensor} != {truth_tensor}'
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Union[str, Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
__A : Tuple = create_tensor(_UpperCAmelCase )
__A : Any = reduce(_UpperCAmelCase , 'mean' )
__A : Any = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ), f'{reduced_tensor} != {truth_tensor}'
def _lowerCAmelCase ( __snake_case : Tuple ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
def _lowerCAmelCase ( ) -> str:
__A : str = PartialState()
state.print(f'State: {state}' )
state.print('testing gather' )
test_gather(_UpperCAmelCase )
state.print('testing gather_object' )
test_gather_object(_UpperCAmelCase )
state.print('testing broadcast' )
test_broadcast(_UpperCAmelCase )
state.print('testing pad_across_processes' )
test_pad_across_processes(_UpperCAmelCase )
state.print('testing reduce_sum' )
test_reduce_sum(_UpperCAmelCase )
state.print('testing reduce_mean' )
test_reduce_mean(_UpperCAmelCase )
if __name__ == "__main__":
main() | 190 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str
__UpperCamelCase : int
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> list[str]:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_a : List[Any] =all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCAmelCase ),
}
return response
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_a : List[str] =int(_UpperCAmelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_a : Optional[int] =[""""""] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_a : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A__: Any = '''Provide a string that I will generate its BWT transform: '''
A__: Union[str, Any] = input(entry_msg).strip()
A__: Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
A__: Union[str, Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 276 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :Any = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Any = 0
while b > 0:
if b & 1:
lowerCAmelCase__ :List[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 356 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__A = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
lowerCAmelCase__ :List[str] = True
# Deal with multi-line cases
elif (
re.search(
rF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _SCREAMING_SNAKE_CASE , )
is not None
):
lowerCAmelCase__ :int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCAmelCase__ :Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCAmelCase__ :Union[str, Any] = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
lowerCAmelCase__ :Union[str, Any] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
lowerCAmelCase__ :Any = True
if not attribute_used:
lowerCAmelCase__ :List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCAmelCase__ :List[str] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCAmelCase__ :Tuple = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCAmelCase__ :Optional[Any] = True
elif attribute.endswith('_token_id' ):
lowerCAmelCase__ :List[Any] = True
# configuration class specific cases
if not case_allowed:
lowerCAmelCase__ :List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCAmelCase__ :List[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __A (_SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCAmelCase__ :List[Any] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
lowerCAmelCase__ :List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCAmelCase__ :Optional[Any] = {}
if len(config_class.attribute_map ) > 0:
lowerCAmelCase__ :Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCAmelCase__ :str = inspect.getsourcefile(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = os.path.dirname(_SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCAmelCase__ :Dict = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for fn in os.listdir(_SCREAMING_SNAKE_CASE ) if fn.startswith('modeling_' )]
# Get the source code strings
lowerCAmelCase__ :Tuple = []
for path in modeling_paths:
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
lowerCAmelCase__ :Any = []
for config_param, default_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
lowerCAmelCase__ :Optional[int] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(_SCREAMING_SNAKE_CASE )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCAmelCase__ :List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _SCREAMING_SNAKE_CASE : inspect.isclass(_SCREAMING_SNAKE_CASE )
and issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and inspect.getmodule(_SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCAmelCase__ :Union[str, Any] = check_config_attributes_being_used(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase__ :int = unused_attributes
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase__ :Any = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes()
| 254 | 0 |
def lowerCAmelCase__( lowercase : str , lowercase : list[str] ) -> str:
__snake_case : Dict = ""
for word_or_phrase in separated:
if not isinstance(lowercase , lowercase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 326 |
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> bool:
__snake_case : List[str] = len(lowercase )
__snake_case : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__snake_case : Optional[Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__snake_case : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__snake_case : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__snake_case : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
'''simple docstring'''
import socket
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCamelCase__ :List[Any] = socket.gethostname()
UpperCamelCase__ :List[str] = 12312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCamelCase__ :str = sock.recv(1024 )
if not data:
break
out_file.write(__a )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main() | 370 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 , __a = "bert-base-cased" ) -> Any:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained(__a )
UpperCamelCase__ :List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def a ( __a , __a , __a , __a ) -> str:
'''simple docstring'''
model.eval()
UpperCamelCase__ :List[str] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ , UpperCamelCase__ :int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
UpperCamelCase__ :Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Any = config['''lr''']
UpperCamelCase__ :Optional[int] = int(config['''num_epochs'''] )
UpperCamelCase__ :List[Any] = int(config['''seed'''] )
UpperCamelCase__ :List[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :List[Any] = args.model_name_or_path
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Any = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
UpperCamelCase__ :Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCamelCase__ :Dict = 1
UpperCamelCase__ :Tuple = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ :Any = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
UpperCamelCase__ :Any = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ :Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
UpperCamelCase__ :List[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase__ :Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase__ :Dict = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCamelCase__ :Tuple = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase__ :Any = int(__a ) + 1
UpperCamelCase__ :Dict = evaluation_loop(__a , __a , __a , __a )
accelerator.print('''resumed checkpoint performance:''' , __a )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.load(__a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase__ :Optional[Any] = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
UpperCamelCase__ :Optional[int] = model(**__a )
UpperCamelCase__ :Optional[int] = outputs.loss
UpperCamelCase__ :str = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase__ :Union[str, Any] = f'''epoch_{epoch}'''
UpperCamelCase__ :List[Any] = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
UpperCamelCase__ :List[Any] = evaluation_loop(__a , __a , __a , __a )
UpperCamelCase__ :int = accuracy
UpperCamelCase__ :List[Any] = lr_scheduler.get_lr()[0]
UpperCamelCase__ :Any = optimizer.param_groups[0]['''lr''']
UpperCamelCase__ :int = epoch
UpperCamelCase__ :Tuple = overall_step
accelerator.print(f'''epoch {epoch}:''' , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(__a , __a )
def a ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__a , )
parser.add_argument(
'''--output_dir''' , type=__a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__a , default=__a , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__a , default=__a , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__a , default=2 , help='''Number of train epochs.''' , )
UpperCamelCase__ :Optional[int] = parser.parse_args()
UpperCamelCase__ :List[str] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main() | 219 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _A : Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
lowerCamelCase__ : Dict = []
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowercase_ ( _A : int , _A : Tuple[int, ...] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = []
for d in reversed(__lowerCAmelCase ):
idx.append(flat_idx % d )
lowerCamelCase__ : Tuple = flat_idx // d
return tuple(reversed(__lowerCAmelCase ) )
@torch.jit.ignore
def lowercase_ ( _A : Sequence[int] , _A : Sequence[int] , _A : Sequence[int] , _A : Optional[Sequence[bool]] = None , _A : Optional[Sequence[bool]] = None , ):
"""simple docstring"""
def reduce_edge_list(_A : List[bool] ) -> None:
lowerCamelCase__ : str = True
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase__ : Tuple = l[reversed_idx]
if start_edges is None:
lowerCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(__lowerCAmelCase )
if end_edges is None:
lowerCamelCase__ : Optional[int] = [e == (d - 1) for e, d in zip(__lowerCAmelCase , __lowerCAmelCase )]
reduce_edge_list(__lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCAmelCase ) == 0:
return [()]
elif len(__lowerCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCamelCase__ : Dict = []
lowerCamelCase__ : int = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCAmelCase , __lowerCAmelCase ):
if s == e:
path_list.append(slice(__lowerCAmelCase , s + 1 ) )
else:
break
lowerCamelCase__ : Union[str, Any] = tuple(__lowerCAmelCase )
lowerCamelCase__ : int = len(__lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase__ : List[Any] = start[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase__ : Any = end[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase__ : Union[str, Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase_ ( _A : torch.Tensor , _A : int , _A : int , _A : int ):
"""simple docstring"""
lowerCamelCase__ : str = t.shape[:no_batch_dims]
lowerCamelCase__ : Any = list(_flat_idx_to_idx(__lowerCAmelCase , __lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase__ : List[Any] = list(_flat_idx_to_idx(flat_end - 1 , __lowerCAmelCase ) )
# Get an ordered list of slices to perform
lowerCamelCase__ : int = _get_minimal_slice_set(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
lowerCamelCase__ : Union[str, Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase_ ( _A : Callable , _A : Dict[str, Any] , _A : int , _A : int , _A : bool = False , _A : Any = None , _A : bool = False , ):
"""simple docstring"""
if not (len(__lowerCAmelCase ) > 0):
raise ValueError("Must provide at least one input" )
lowerCamelCase__ : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCAmelCase )]
lowerCamelCase__ : List[str] = tuple([max(__lowerCAmelCase ) for s in zip(*__lowerCAmelCase )] )
def _prep_inputs(_A : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase__ : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase__ : int = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCamelCase__ : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase__ : Dict = tensor_tree_map(_prep_inputs , __lowerCAmelCase )
lowerCamelCase__ : List[str] = None
if _out is not None:
lowerCamelCase__ : Tuple = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCamelCase__ : Tuple = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase__ : Optional[int] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_A : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase__ : str = 0
lowerCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(__lowerCAmelCase ):
# Chunk the input
if not low_mem:
lowerCamelCase__ : List[Any] = _select_chunk
else:
lowerCamelCase__ : List[str] = partial(
_chunk_slice , flat_start=__lowerCAmelCase , flat_end=min(__lowerCAmelCase , i + chunk_size ) , no_batch_dims=len(__lowerCAmelCase ) , )
lowerCamelCase__ : Optional[int] = tensor_tree_map(__lowerCAmelCase , __lowerCAmelCase )
# Run the layer on the chunk
lowerCamelCase__ : Optional[Any] = layer(**__lowerCAmelCase )
# Allocate space for the output
if out is None:
lowerCamelCase__ : str = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
def assign(_A : dict , _A : dict ) -> None:
for k, v in da.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assign(__lowerCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase__ : str = da[k]
assign(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for xa, xa in zip(__lowerCAmelCase , __lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase__ : Optional[Any] = xa
elif isinstance(__lowerCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase__ : List[str] = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
lowerCamelCase__ : Tuple = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , __lowerCAmelCase )
return out
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] = 512 , ):
'''simple docstring'''
lowerCamelCase__ : str = max_chunk_size
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Any = None
def lowerCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase__ : int = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase__ : Tuple = [c for c in candidates if c > min_chunk_size]
lowerCamelCase__ : int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCamelCase : Any ) -> bool:
try:
with torch.no_grad():
fn(*_UpperCamelCase , chunk_size=_UpperCamelCase )
return True
except RuntimeError:
return False
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Any = len(_UpperCamelCase ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase__ : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase__ : str = (min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase__ : Optional[int] = i
lowerCamelCase__ : Optional[Any] = (i + len(_UpperCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : int = True
for aa, aa in zip(_UpperCamelCase , _UpperCamelCase ):
assert type(_UpperCamelCase ) == type(_UpperCamelCase )
if isinstance(_UpperCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCamelCase__ : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda __lowerCamelCase : x[0] )]
lowerCamelCase__ : Any = [v for _, v in sorted(aa.items() , key=lambda __lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(_UpperCamelCase , _UpperCamelCase )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , ):
'''simple docstring'''
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = tree_map(lambda __lowerCamelCase : a.shape if isinstance(_UpperCamelCase , torch.Tensor ) else a , _UpperCamelCase , _UpperCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_UpperCamelCase )
lowerCamelCase__ : List[str] = self._compare_arg_caches(self.cached_arg_data , _UpperCamelCase )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase__ : Any = False
if not consistent:
lowerCamelCase__ : Tuple = self._determine_favorable_chunk_size(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
lowerCamelCase__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 184 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( __lowerCAmelCase : str = "" ):
"""simple docstring"""
lowerCAmelCase_ = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowerCAmelCase_ = BeautifulSoup(requests.get(__lowerCAmelCase ).text , "html.parser" )
lowerCAmelCase_ = soup.find_all("td" , attrs="titleColumn" )
lowerCAmelCase_ = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__lowerCAmelCase , __lowerCAmelCase )
}
def lowerCamelCase__ ( __lowerCAmelCase : str = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
lowerCAmelCase_ = get_imdb_top_aaa_movies()
with open(__lowerCAmelCase , "w" , newline="" ) as out_file:
lowerCAmelCase_ = csv.writer(__lowerCAmelCase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 231 | 0 |
def UpperCamelCase ( snake_case__ : str ) -> str:
UpperCamelCase : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase : str = ''
UpperCamelCase : Dict = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(snake_case__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase : Tuple = [1 for i in range(len(snake_case__ ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase : Dict = 0
for j in range(len(snake_case__ ) ):
UpperCamelCase : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(snake_case__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase : Optional[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase : Union[str, Any] = j - k + 1 # noqa: E741
UpperCamelCase : Tuple = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase : List[Any] = length[j]
UpperCamelCase : Dict = j
# create that string
UpperCamelCase : List[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def UpperCamelCase ( snake_case__ : Accelerator , snake_case__ : int = 16 ) -> Dict:
UpperCamelCase : str = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : List[str] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase : Dict = 8
else:
UpperCamelCase : Union[str, Any] = None
return tokenizer.pad(
snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCamelCase : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def UpperCamelCase ( snake_case__ : str , snake_case__ : List[Any] ) -> List[str]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case__ ) == "1":
UpperCamelCase : List[str] = 2
# New Code #
UpperCamelCase : Optional[int] = int(args.gradient_accumulation_steps )
UpperCamelCase : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config['lr']
UpperCamelCase : Optional[int] = int(config['num_epochs'] )
UpperCamelCase : List[Any] = int(config['seed'] )
UpperCamelCase : List[str] = int(config['batch_size'] )
UpperCamelCase : Optional[Any] = evaluate.load('glue' , 'mrpc' )
set_seed(snake_case__ )
UpperCamelCase , UpperCamelCase : int = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
UpperCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
UpperCamelCase : int = model(**snake_case__ )
UpperCamelCase : Union[str, Any] = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Dict = model(**snake_case__ )
UpperCamelCase : Any = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , snake_case__ )
def UpperCamelCase ( ) -> Dict:
UpperCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=snake_case__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase : str = parser.parse_args()
UpperCamelCase : Tuple = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 103 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = VQModel
UpperCAmelCase__ : Optional[Any] = '''sample'''
@property
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]=(32, 32)):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes).to(_snake_case)
return {"sample": image}
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_snake_case)
self.assertIsNotNone(_snake_case)
self.assertEqual(len(loading_info['''missing_keys''']) , 0)
model.to(_snake_case)
UpperCAmelCase_ = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = VQModel.from_pretrained('''fusing/vqgan-dummy''')
model.to(_snake_case).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
UpperCAmelCase_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
UpperCAmelCase_ = image.to(_snake_case)
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase_ = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
| 51 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :bool , a :bool ) -> int:
def run_func(a :Union[str, Any] ):
@wraps(a )
def run_in_eager_mode(*a :Tuple , **a :List[Any] ):
return func(*a , **a )
@wraps(a )
@tf.function(experimental_compile=a )
def run_in_graph_mode(*a :List[str] , **a :Optional[Any] ):
return func(*a , **a )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( a :int , a :int , a :int ) -> ["tf.Tensor"]:
a = random.Random()
a = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(a , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
__snake_case = "TensorFlow"
@property
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
return tf.__version__
def __lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->float:
"""simple docstring"""
a = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
a = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_speed(_inference )
def __lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->float:
"""simple docstring"""
a = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
a = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_speed(_train )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->[Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase )
a = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
a = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_memory(_inference )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->[Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase )
a = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
a = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return self._measure_memory(_train )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->Callable[[], None]:
"""simple docstring"""
a = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
a = (
hasattr(__UpperCAmelCase , '''architectures''' )
and isinstance(config.architectures , __UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
a = __import__('''transformers''' , fromlist=[model_class] )
a = getattr(__UpperCAmelCase , __UpperCAmelCase )
a = model_cls(__UpperCAmelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
a = TF_MODEL_MAPPING[config.__class__](__UpperCAmelCase )
# encoder-decoder has vocab size saved differently
a = config.vocab_size if hasattr(__UpperCAmelCase , '''vocab_size''' ) else config.encoder.vocab_size
a = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , training=__UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCAmelCase , training=__UpperCAmelCase )
a = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->Callable[[], None]:
"""simple docstring"""
a = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
a = (
hasattr(__UpperCAmelCase , '''architectures''' )
and isinstance(config.architectures , __UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
a = __import__('''transformers''' , fromlist=[model_class] )
a = getattr(__UpperCAmelCase , __UpperCAmelCase )
a = model_cls(__UpperCAmelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
a = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCAmelCase )
# encoder-decoder has vocab size saved differently
a = config.vocab_size if hasattr(__UpperCAmelCase , '''vocab_size''' ) else config.encoder.vocab_size
a = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
a = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0]
a = tf.gradients(__UpperCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
a = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )[0]
a = tf.gradients(__UpperCAmelCase , model.trainable_variables )
return gradients
a = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Dict ) ->float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(__UpperCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
a = timeit.repeat(
__UpperCAmelCase , repeat=self.args.repeat , number=10 , )
return min(__UpperCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Callable[[], None] ) ->[Memory, MemorySummary]:
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
a = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
a = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
a = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
a = nvml.nvmlDeviceGetMemoryInfo(__UpperCAmelCase )
a = meminfo.used
a = Memory(__UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
a = None
else:
a = measure_peak_memory_cpu(__UpperCAmelCase )
a = Memory(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
a = stop_memory_tracing(__UpperCAmelCase )
if memory is None:
a = summary.total
else:
a = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''CLIPImageProcessor'''
__snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
a = kwargs.pop('''feature_extractor''' )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : List[str] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str ) ->Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 26 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
A: List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: List[str] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Union[PIL.Image.Image, np.ndarray]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
prior=_SCREAMING_SNAKE_CASE , image_encoder=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , renderer=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if latents is None:
UpperCAmelCase : Union[str, Any] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase : List[str] = latents.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}" )
UpperCAmelCase : Optional[Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(_SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCAmelCase : Any = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase : Optional[Any] = image.to(dtype=self.image_encoder.dtype , device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.image_encoder(_SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
UpperCAmelCase : int = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Tuple = torch.zeros_like(_SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : str = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 25 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCAmelCase : str = image.shape[0]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
F"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_SCREAMING_SNAKE_CASE )}" )
UpperCAmelCase : str = self._execution_device
UpperCAmelCase : List[Any] = batch_size * num_images_per_prompt
UpperCAmelCase : List[Any] = guidance_scale > 1.0
UpperCAmelCase : Dict = self._encode_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.scheduler.timesteps
UpperCAmelCase : Optional[int] = self.prior.config.num_embeddings
UpperCAmelCase : int = self.prior.config.embedding_dim
UpperCAmelCase : Tuple = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase : List[Any] = latents.reshape(latents.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.prior(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , proj_embedding=_SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
UpperCAmelCase , UpperCAmelCase : Dict = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase , UpperCAmelCase : Dict = noise_pred.chunk(2 )
UpperCAmelCase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase : Optional[Any] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = []
for i, latent in enumerate(_SCREAMING_SNAKE_CASE ):
print()
UpperCAmelCase : int = self.renderer.decode(
latent[None, :] , _SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = torch.stack(_SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(F"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase : List[Any] = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase : Tuple = [self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 109 |
"""simple docstring"""
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = data
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def _snake_case ( UpperCamelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _snake_case ( UpperCamelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _snake_case ( UpperCamelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _snake_case ( ): # Main function for testing.
UpperCAmelCase : int = Node(1 )
UpperCAmelCase : Tuple = Node(2 )
UpperCAmelCase : Any = Node(3 )
UpperCAmelCase : Optional[int] = Node(4 )
UpperCAmelCase : Any = Node(5 )
UpperCAmelCase : Optional[int] = Node(6 )
UpperCAmelCase : int = Node(7 )
UpperCAmelCase : str = Node(8 )
UpperCAmelCase : str = Node(9 )
print(is_full_binary_tree(UpperCamelCase ) )
print(depth_of_tree(UpperCamelCase ) )
print("""Tree is: """ )
display(UpperCamelCase )
if __name__ == "__main__":
main()
| 109 | 1 |
"""simple docstring"""
from math import factorial
def _A ( lowercase , lowercase ):
"""simple docstring"""
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(lowercase ) // (factorial(lowercase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F'fifty-two card deck is: {combinations(5_2, 5)}\n',
)
print(
"""If a class of 40 students must be arranged into groups of""",
F'4 for group projects, there are {combinations(4_0, 4)} ways',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F'are {combinations(1_0, 3)} ways that first, second and',
"""third place can be awarded.""",
) | 362 |
"""simple docstring"""
def _A ( lowercase = 2_00_00_00 ):
"""simple docstring"""
a =[0 for i in range(n + 1 )]
a =1
a =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
a =1
a =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }') | 215 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCAmelCase ( UpperCamelCase__ ) -> Dict:
__lowerCamelCase = 0
__lowerCamelCase = len(UpperCamelCase__ ) # No of vertices in graph
__lowerCamelCase = [0] * n
__lowerCamelCase = [False] * n
def dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = True
__lowerCamelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , id_ )
__lowerCamelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__lowerCamelCase = min(low[at] , low[to] )
__lowerCamelCase = []
for i in range(UpperCamelCase__ ):
if not visited[i]:
dfs(UpperCamelCase__ , -1 , UpperCamelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
UpperCAmelCase : List[str] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=UpperCamelCase )
UpperCAmelCase : Dict = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=UpperCamelCase )
env_command_parser(subparsers=UpperCamelCase )
launch_command_parser(subparsers=UpperCamelCase )
tpu_command_parser(subparsers=UpperCamelCase )
test_command_parser(subparsers=UpperCamelCase )
# Let's go
UpperCAmelCase : Optional[int] = parser.parse_args()
if not hasattr(UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase )
if __name__ == "__main__":
main()
| 109 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
SCREAMING_SNAKE_CASE_ = random.Random()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: str=1.0 , lowerCAmelCase: Optional[Any]=None , lowerCAmelCase: Tuple=None ) -> Dict:
if rng is None:
_UpperCAmelCase : Dict = global_rng
_UpperCAmelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=24 , A_=24 , A_=0.0 , A_=16000 , A_=True , A_=True , ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Optional[Any] = min_seq_length
_UpperCAmelCase : Tuple = max_seq_length
_UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : Dict = feature_size
_UpperCAmelCase : str = num_mel_bins
_UpperCAmelCase : Union[str, Any] = padding_value
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : Optional[Any] = return_attention_mask
_UpperCAmelCase : List[str] = do_normalize
def _UpperCAmelCase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCAmelCase ( self , A_=False , A_=False ):
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*_A ) )
if equal_length:
_UpperCAmelCase : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase : Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : Optional[int] = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a ( UpperCAmelCase , unittest.TestCase ):
_lowercase = SpeechaTextFeatureExtractor if is_speech_available() else None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = SpeechaTextFeatureExtractionTester(self )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1e-3 ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Union[str, Any] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase : Optional[int] = feature_extractor(_A , padding=_A , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase : int = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase : Any = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
_UpperCAmelCase : Optional[int] = feature_extractor(_A , return_tensors="np" ).input_features
_UpperCAmelCase : int = feature_extractor(_A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase : Union[str, Any] = np.asarray(_A )
_UpperCAmelCase : Optional[Any] = feature_extractor(_A , return_tensors="np" ).input_features
_UpperCAmelCase : Tuple = feature_extractor(_A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Union[str, Any] = ['longest', 'max_length', 'do_not_pad']
_UpperCAmelCase : Union[str, Any] = [None, 16, None]
for max_length, padding in zip(_A , _A ):
_UpperCAmelCase : str = feature_extractor(
_A , padding=_A , max_length=_A , return_attention_mask=_A )
_UpperCAmelCase : Dict = inputs.input_features
_UpperCAmelCase : Dict = inputs.attention_mask
_UpperCAmelCase : List[Any] = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : List[Any] = ['longest', 'max_length', 'do_not_pad']
_UpperCAmelCase : List[str] = [None, 16, None]
for max_length, padding in zip(_A , _A ):
_UpperCAmelCase : Dict = feature_extractor(
_A , max_length=_A , padding=_A , return_tensors="np" , return_attention_mask=_A )
_UpperCAmelCase : Dict = inputs.input_features
_UpperCAmelCase : Optional[Any] = inputs.attention_mask
_UpperCAmelCase : List[str] = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : str = feature_extractor(
_A , padding="max_length" , max_length=4 , truncation=_A , return_tensors="np" , return_attention_mask=_A , )
_UpperCAmelCase : Union[str, Any] = inputs.input_features
_UpperCAmelCase : str = inputs.attention_mask
_UpperCAmelCase : str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Optional[Any] = feature_extractor(
_A , padding="longest" , max_length=4 , truncation=_A , return_tensors="np" , return_attention_mask=_A , )
_UpperCAmelCase : Union[str, Any] = inputs.input_features
_UpperCAmelCase : Optional[Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Tuple = feature_extractor(
_A , padding="longest" , max_length=16 , truncation=_A , return_tensors="np" , return_attention_mask=_A , )
_UpperCAmelCase : Tuple = inputs.input_features
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : int = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
import torch
_UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Any = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
from datasets import load_dataset
_UpperCAmelCase : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase : int = ds.sort("id" ).select(range(_A ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
_UpperCAmelCase : Optional[int] = self._load_datasamples(1 )
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : str = feature_extractor(_A , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _A , atol=1e-4 ) )
| 357 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: List[str] , lowerCAmelCase: int=8 ) -> int:
_UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( UpperCAmelCase ):
def __init__( self , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
_UpperCAmelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
_UpperCAmelCase : Any = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase : Optional[int] = latents.to(A_ )
_UpperCAmelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCAmelCase : str = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Dict = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ):
'''simple docstring'''
_UpperCAmelCase : str = self._execution_device
_UpperCAmelCase : Tuple = guidance_scale > 1.0
if isinstance(A_ , A_ ):
_UpperCAmelCase : Union[str, Any] = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Dict = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Any = torch.cat(A_ , dim=0 )
_UpperCAmelCase : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Tuple = hint.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
_UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
_UpperCAmelCase : Dict = self.scheduler.timesteps
_UpperCAmelCase : Union[str, Any] = self.movq.config.latent_channels
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Optional[Any] = {"image_embeds": image_embeds, "hint": hint}
_UpperCAmelCase : Optional[int] = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = variance_pred.chunk(2 )
_UpperCAmelCase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
_UpperCAmelCase : Optional[Any] = self.movq.decode(A_ , force_not_quantize=A_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Union[str, Any] = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 189 | 0 |
_SCREAMING_SNAKE_CASE = 8.31_44_62 # Unit - J mol-1 K-1
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 343 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = ShapEPipeline
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = ["""prompt"""]
__lowerCAmelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase = False
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase = PriorTransformer(**lowerCamelCase_ )
return model
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase = ShapERenderer(**lowerCamelCase_ )
return model
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.dummy_prior
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = self.dummy_tokenizer
UpperCamelCase = self.dummy_renderer
UpperCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , )
UpperCamelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCamelCase = output.images[0]
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch_device == """cpu"""
UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase = batch_size * [inputs[key]]
UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCamelCase = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase = pipe(
"""a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 343 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _A :
def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase )
__UpperCAmelCase : List[str] = length
__UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa )
__UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Any = True
def __A ( self , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : Optional[int] = False
return x * self.a[0] + self.b[0]
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : str = True
def __A ( self , __UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : int = False
return x * self.a + self.b
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ )
__UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" )
__UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : List[Any] = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
if "label" in examples:
__UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCAmelCase : Tuple = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCAmelCase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
__UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 16 |
'''simple docstring'''
from statistics import mean
import numpy as np
def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Tuple = 0
# Number of processes finished
__UpperCAmelCase : Optional[int] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__UpperCAmelCase : Tuple = [0] * no_of_process
# List to include calculation results
__UpperCAmelCase : int = [0] * no_of_process
# Sort by arrival time.
__UpperCAmelCase : Dict = [burst_time[i] for i in np.argsort(lowerCAmelCase__ )]
__UpperCAmelCase : Union[str, Any] = [process_name[i] for i in np.argsort(lowerCAmelCase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__UpperCAmelCase : Dict = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__UpperCAmelCase : Any = arrival_time[i]
__UpperCAmelCase : Any = 0
# Index showing the location of the process being performed
__UpperCAmelCase : Any = 0
# Saves the current response ratio.
__UpperCAmelCase : List[str] = 0
for i in range(0 , lowerCAmelCase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__UpperCAmelCase : Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__UpperCAmelCase : Tuple = temp
__UpperCAmelCase : List[str] = i
# Calculate the turn around time
__UpperCAmelCase : Tuple = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__UpperCAmelCase : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [0] * no_of_process
for i in range(0 , lowerCAmelCase__ ):
__UpperCAmelCase : List[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCamelCase = 5
_UpperCamelCase = ['''A''', '''B''', '''C''', '''D''', '''E''']
_UpperCamelCase = [1, 2, 3, 4, 5]
_UpperCamelCase = [1, 2, 3, 4, 5]
_UpperCamelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCamelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 16 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = ["""image_processor""", """tokenizer"""]
UpperCamelCase = """LayoutLMv3ImageProcessor"""
UpperCamelCase = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Optional[Any], __A : Any=None, __A : Dict=None, **__A : Dict ):
UpperCAmelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', __A, )
UpperCAmelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__A, __A )
def __call__( self : Optional[Any], __A : str, __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, __A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, __A : Union[List[List[int]], List[List[List[int]]]] = None, __A : Optional[Union[List[int], List[List[int]]]] = None, __A : bool = True, __A : Union[bool, str, PaddingStrategy] = False, __A : Union[bool, str, TruncationStrategy] = None, __A : Optional[int] = None, __A : int = 0, __A : Optional[int] = None, __A : Optional[bool] = None, __A : Optional[bool] = None, __A : bool = False, __A : bool = False, __A : bool = False, __A : bool = False, __A : bool = True, __A : Optional[Union[str, TensorType]] = None, **__A : Tuple, ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
UpperCAmelCase : int = self.image_processor(images=__A, return_tensors=__A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__A, __A ):
UpperCAmelCase : Any = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase : List[str] = features['''words''']
UpperCAmelCase : Union[str, Any] = self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=__A, add_special_tokens=__A, padding=__A, truncation=__A, max_length=__A, stride=__A, pad_to_multiple_of=__A, return_token_type_ids=__A, return_attention_mask=__A, return_overflowing_tokens=__A, return_special_tokens_mask=__A, return_offsets_mapping=__A, return_length=__A, verbose=__A, return_tensors=__A, **__A, )
# add pixel values
UpperCAmelCase : List[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase : Tuple = self.get_overflowing_images(__A, encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase : int = images
return encoded_inputs
def __magic_name__ ( self : str, __A : List[Any], __A : Any ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__A ) != len(__A ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F''' {len(__A )} and {len(__A )}''' )
return images_with_overflow
def __magic_name__ ( self : Any, *__A : List[Any], **__A : Tuple ):
return self.tokenizer.batch_decode(*__A, **__A )
def __magic_name__ ( self : Any, *__A : Optional[int], **__A : int ):
return self.tokenizer.decode(*__A, **__A )
@property
def __magic_name__ ( self : List[Any] ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __magic_name__ ( self : Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', __A, )
return self.image_processor_class
@property
def __magic_name__ ( self : str ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', __A, )
return self.image_processor
| 336 |
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = 384
_SCREAMING_SNAKE_CASE : int = 7
if "tiny" in model_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = 96
_SCREAMING_SNAKE_CASE : int = (2, 2, 6, 2)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (3, 6, 12, 24)
elif "small" in model_name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 96
_SCREAMING_SNAKE_CASE : str = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE : Optional[int] = (3, 6, 12, 24)
elif "base" in model_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = 128
_SCREAMING_SNAKE_CASE : str = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE : Tuple = (4, 8, 16, 32)
_SCREAMING_SNAKE_CASE : Optional[int] = 12
_SCREAMING_SNAKE_CASE : Union[str, Any] = 512
elif "large" in model_name:
_SCREAMING_SNAKE_CASE : Tuple = 192
_SCREAMING_SNAKE_CASE : str = (2, 2, 18, 2)
_SCREAMING_SNAKE_CASE : int = (6, 12, 24, 48)
_SCREAMING_SNAKE_CASE : str = 12
_SCREAMING_SNAKE_CASE : Tuple = 768
# set label information
_SCREAMING_SNAKE_CASE : int = 150
_SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
_SCREAMING_SNAKE_CASE : Dict = "ade20k-id2label.json"
_SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
_SCREAMING_SNAKE_CASE : Any = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : List[Any] = SwinConfig(
embed_dim=__lowerCamelCase, depths=__lowerCamelCase, num_heads=__lowerCamelCase, window_size=__lowerCamelCase, out_features=["stage1", "stage2", "stage3", "stage4"], )
_SCREAMING_SNAKE_CASE : Tuple = UperNetConfig(
backbone_config=__lowerCamelCase, auxiliary_in_channels=__lowerCamelCase, num_labels=__lowerCamelCase, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = dct.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_SCREAMING_SNAKE_CASE : Any = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
_SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : Any = in_proj_weight[:dim, :]
_SCREAMING_SNAKE_CASE : str = in_proj_bias[: dim]
_SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE : Dict = in_proj_weight[
-dim :, :
]
_SCREAMING_SNAKE_CASE : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = x.shape
_SCREAMING_SNAKE_CASE : Dict = x.reshape(__lowerCamelCase, 4, in_channel // 4 )
_SCREAMING_SNAKE_CASE : List[str] = x[:, [0, 2, 1, 3], :].transpose(1, 2 ).reshape(__lowerCamelCase, __lowerCamelCase )
return x
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = x.shape
_SCREAMING_SNAKE_CASE : int = x.reshape(__lowerCamelCase, in_channel // 4, 4 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = x[:, :, [0, 2, 1, 3]].transpose(1, 2 ).reshape(__lowerCamelCase, __lowerCamelCase )
return x
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
_SCREAMING_SNAKE_CASE : str = x.reshape(4, in_channel // 4 )
_SCREAMING_SNAKE_CASE : Optional[int] = x[[0, 2, 1, 3], :].transpose(0, 1 ).reshape(__lowerCamelCase )
return x
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = x.shape[0]
_SCREAMING_SNAKE_CASE : List[Any] = x.reshape(in_channel // 4, 4 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = x[:, [0, 2, 1, 3]].transpose(0, 1 ).reshape(__lowerCamelCase )
return x
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
_SCREAMING_SNAKE_CASE : Any = model_name_to_url[model_name]
_SCREAMING_SNAKE_CASE : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase, map_location="cpu", file_name=__lowerCamelCase )[
"state_dict"
]
for name, param in state_dict.items():
print(__lowerCamelCase, param.shape )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_upernet_config(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(__lowerCamelCase )
if "bn" in key:
_SCREAMING_SNAKE_CASE : Dict = key.replace("bn", "batch_norm" )
_SCREAMING_SNAKE_CASE : Any = val
# rename keys
_SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_SCREAMING_SNAKE_CASE : Union[str, Any] = reverse_correct_unfold_reduction_order(__lowerCamelCase )
if "norm" in key:
_SCREAMING_SNAKE_CASE : List[Any] = reverse_correct_unfold_norm_order(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
_SCREAMING_SNAKE_CASE : Union[str, Any] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_SCREAMING_SNAKE_CASE : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ).convert("RGB" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = SegformerImageProcessor()
_SCREAMING_SNAKE_CASE : List[str] = processor(__lowerCamelCase, return_tensors="pt" ).pixel_values
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = outputs.logits
print(logits.shape )
print("First values of logits:", logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], __lowerCamelCase, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase__ =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 325 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
) | 325 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''swin2sr'''
__snake_case = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , __UpperCAmelCase : int=64 , __UpperCAmelCase : int=1 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=180 , __UpperCAmelCase : Union[str, Any]=[6, 6, 6, 6, 6, 6] , __UpperCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] , __UpperCAmelCase : Union[str, Any]=8 , __UpperCAmelCase : Union[str, Any]=2.0 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : str=False , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[Any]=1e-5 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Union[str, Any]=1.0 , __UpperCAmelCase : List[Any]="1conv" , __UpperCAmelCase : int="pixelshuffle" , **__UpperCAmelCase : str , ) ->Tuple:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__UpperCAmelCase )
a = num_heads
a = window_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = use_absolute_embeddings
a = layer_norm_eps
a = initializer_range
a = upscale
a = img_range
a = resi_connection
a = upsampler
| 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A ='pt'
elif is_tf_available():
A ='tf'
else:
A ='jax'
class _a ( __a , unittest.TestCase ):
__a : Optional[Any] = PerceiverTokenizer
__a : str = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A ( self : Union[str, Any] , **lowercase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(len(lowercase ) ):
try:
UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) )
UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
UpperCAmelCase = ''' ''' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = '''Unicode €.'''
UpperCAmelCase = tokenizer(lowercase )
UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' )
UpperCAmelCase = tokenizer('''e è é ê ë''' )
UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowercase )
self.assertIn('''attention_mask''' , lowercase )
self.assertNotIn('''decoder_input_ids''' , lowercase )
self.assertNotIn('''decoder_attention_mask''' , lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase = tokenizer(
text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )]
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : str ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
| 34 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['''audio_values''', '''audio_mask''']
def __init__( self : List[str] , UpperCAmelCase_ : int=2_048 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Optional[Any]=[16, 16] , UpperCAmelCase_ : Dict=128 , UpperCAmelCase_ : Union[str, Any]=44_100 , UpperCAmelCase_ : Union[str, Any]=86 , UpperCAmelCase_ : int=2_048 , UpperCAmelCase_ : Optional[int]=0.0 , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_ , )
__UpperCAmelCase : List[Any] = spectrogram_length
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : List[str] = patch_size
__UpperCAmelCase : str = feature_size // self.patch_size[1]
__UpperCAmelCase : Tuple = n_fft
__UpperCAmelCase : List[Any] = sampling_rate // hop_length_to_sampling_rate
__UpperCAmelCase : Any = sampling_rate
__UpperCAmelCase : Tuple = padding_value
__UpperCAmelCase : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase_ , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=UpperCAmelCase_ , norm="slaney" , mel_scale="slaney" , ).T
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : np.array ):
"""simple docstring"""
__UpperCAmelCase : str = spectrogram(
UpperCAmelCase_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
__UpperCAmelCase : Any = log_spec[:, :-1]
__UpperCAmelCase : Union[str, Any] = log_spec - 20.0
__UpperCAmelCase : Dict = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Tuple , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__UpperCAmelCase : Tuple = isinstance(UpperCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
__UpperCAmelCase : Dict = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray ):
__UpperCAmelCase : Dict = np.asarray(UpperCAmelCase_ , dtype=np.floataa )
elif isinstance(UpperCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : Optional[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__UpperCAmelCase : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase_ ):
__UpperCAmelCase : Any = [np.asarray(UpperCAmelCase_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__UpperCAmelCase : str = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__UpperCAmelCase : Optional[int] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__UpperCAmelCase : Dict = np.array(UpperCAmelCase_ ).astype(np.floataa )
# convert into correct format for padding
__UpperCAmelCase : str = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__UpperCAmelCase : Dict = np.ones([len(UpperCAmelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__UpperCAmelCase : List[Any] = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase_ ) ):
__UpperCAmelCase : int = audio_features[i]
__UpperCAmelCase : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
__UpperCAmelCase : Union[str, Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
__UpperCAmelCase : Dict = {"audio_values": padded_audio_features}
__UpperCAmelCase : str = BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
return encoded_inputs
| 362 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ : Dict = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = 0
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCAmelCase : int = os.path.join(UpperCAmelCase_ , "fake-roberta" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("model" , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register("bert" , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("bert-base" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision="aaaaaa" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
__UpperCAmelCase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''new-model'''
try:
AutoConfig.register("new-model" , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 37 | 0 |
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), F'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
lowerCamelCase__ = F'The input value of [n={number}] has to be > 0'
raise ValueError(__snake_case )
else:
lowerCamelCase__ = sylvester(number - 1 )
lowerCamelCase__ = num - 1
lowerCamelCase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 209 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = (UnCLIPScheduler,)
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCAmelCase )
return config
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCAmelCase , prev_timestep=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__lowerCAmelCase ) - -0.001_0011 < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(2_5 )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ = None
else:
lowerCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prev_timestep=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 209 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Any = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = '''xlm'''
__lowerCamelCase : Dict = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : List[str] , __lowerCAmelCase : str=3_01_45 , __lowerCAmelCase : Optional[int]=20_48 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : str=16 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : int=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Any=5_12 , __lowerCAmelCase : Optional[int]=20_48**-0.5 , __lowerCAmelCase : Optional[int]=1e-12 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]="first" , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : int=5 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : str=0 , **__lowerCAmelCase : int , ) -> Union[str, Any]:
"""simple docstring"""
A__ = vocab_size
A__ = emb_dim
A__ = n_layers
A__ = n_heads
A__ = dropout
A__ = attention_dropout
A__ = gelu_activation
A__ = sinusoidal_embeddings
A__ = causal
A__ = asm
A__ = n_langs
A__ = use_lang_emb
A__ = layer_norm_eps
A__ = bos_index
A__ = eos_index
A__ = pad_index
A__ = unk_index
A__ = mask_index
A__ = is_encoder
A__ = max_position_embeddings
A__ = embed_init_std
A__ = init_std
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_proj_to_labels
A__ = summary_first_dropout
A__ = start_n_top
A__ = end_n_top
A__ = mask_token_id
A__ = lang_id
if "n_words" in kwargs:
A__ = kwargs["""n_words"""]
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 276 |
import unittest
import numpy as np
def __lowerCamelCase ( __a :np.ndarray , __a :np.ndarray , __a :np.ndarray , __a :np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
A__ = np.shape(__a )
A__ = np.shape(__a )
A__ = np.shape(__a )
if shape_a[0] != shape_b[0]:
A__ = (
"""Expected the same number of rows for A and B. """
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(__a )
if shape_b[1] != shape_c[1]:
A__ = (
"""Expected the same number of columns for B and C. """
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(__a )
A__ = pseudo_inv
if a_inv is None:
try:
A__ = np.linalg.inv(__a )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1], [6, 3]] )
A__ = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = np.block([[a, b], [b.T, c]] )
A__ = np.linalg.det(__lowerCAmelCase )
A__ = np.linalg.det(__lowerCAmelCase )
A__ = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def a_ ( self : str ) -> None:
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : List[str] ) -> None:
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 276 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = 1.5
__magic_name__ = int(factor * num_class_images )
__magic_name__ = ClipClient(
url="""https://knn.laion.ai/knn-service""", indice_name="""laion_400m""", num_images=A_, aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''', exist_ok=A_ )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
__magic_name__ = client.query(text=A_ )
if len(A_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
__magic_name__ = int(factor * num_images )
__magic_name__ = ClipClient(
url="""https://knn.laion.ai/knn-service""", indice_name="""laion_400m""", num_images=A_, aesthetic_weight=0.1, )
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = tqdm(desc="""downloading real regularization images""", total=A_ )
with open(f'''{class_data_dir}/caption.txt''', """w""" ) as fa, open(f'''{class_data_dir}/urls.txt''', """w""" ) as fa, open(
f'''{class_data_dir}/images.txt''', """w""" ) as fa:
while total < num_class_images:
__magic_name__ = class_images[count]
count += 1
try:
__magic_name__ = requests.get(images["""url"""] )
if img.status_code == 200:
__magic_name__ = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''', """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser("""""", add_help=A_ )
parser.add_argument("""--class_prompt""", help="""text prompt to retrieve images""", required=A_, type=A_ )
parser.add_argument("""--class_data_dir""", help="""path to save images""", required=A_, type=A_ )
parser.add_argument("""--num_class_images""", help="""number of images to download""", default=200, type=A_ )
return parser.parse_args()
if __name__ == "__main__":
__lowerCAmelCase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
snake_case__ = None
snake_case__ = logging.get_logger(__name__)
snake_case__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
snake_case__ = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
snake_case__ = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = NllbTokenizer
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="<s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : str="</s>" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Optional[int]="<unk>" , _lowerCamelCase : Tuple="<pad>" , _lowerCamelCase : List[str]="<mask>" , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : Tuple , ):
"""simple docstring"""
A_ : Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
A_ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , legacy_behaviour=_lowerCamelCase , **_lowerCamelCase , )
A_ : str = vocab_file
A_ : int = False if not self.vocab_file else True
A_ : Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
A_ : str = {
lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
A_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
A_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self : int , _lowerCamelCase : str ):
"""simple docstring"""
A_ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
A_ : Dict = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] , _lowerCamelCase : Optional[str] , **_lowerCamelCase : str ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : List[Any] = src_lang
A_ : List[Any] = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
A_ : str = self.convert_tokens_to_ids(_lowerCamelCase )
A_ : Tuple = tgt_lang_id
return inputs
def _a ( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str = "eng_Latn" , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "fra_Latn" , **_lowerCamelCase : List[Any] , ):
"""simple docstring"""
A_ : Dict = src_lang
A_ : Any = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : Optional[int] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Any , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.convert_tokens_to_ids(_lowerCamelCase )
if self.legacy_behaviour:
A_ : Tuple = []
A_ : int = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Any = [self.cur_lang_code]
A_ : str = [self.eos_token_id]
A_ : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : str = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self : Union[str, Any] , _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = self.convert_tokens_to_ids(_lowerCamelCase )
if self.legacy_behaviour:
A_ : Tuple = []
A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
A_ : Dict = [self.cur_lang_code]
A_ : Tuple = [self.eos_token_id]
A_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
A_ : Any = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 364 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = BlipImageProcessor()
UpperCamelCase_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
UpperCamelCase_ = BlipaProcessor(_lowercase , _lowercase )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).tokenizer
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
UpperCamelCase_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = image_processor(_lowercase , return_tensors="np" )
UpperCamelCase_ = processor(images=_lowercase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
UpperCamelCase_ = "lower newer"
UpperCamelCase_ = processor(text=_lowercase )
UpperCamelCase_ = tokenizer(_lowercase , return_token_type_ids=_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
UpperCamelCase_ = "lower newer"
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
UpperCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_ = processor.batch_decode(_lowercase )
UpperCamelCase_ = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = BlipaProcessor(tokenizer=_lowercase , image_processor=_lowercase )
UpperCamelCase_ = "lower newer"
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_lowercase , images=_lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 128 | from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Dict , _lowercase : CLIPSegForImageSegmentation , _lowercase : CLIPSegProcessor , _lowercase : AutoencoderKL , _lowercase : CLIPTextModel , _lowercase : CLIPTokenizer , _lowercase : UNetaDConditionModel , _lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowercase : StableDiffusionSafetyChecker , _lowercase : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowercase , segmentation_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def __a ( self : List[Any] , _lowercase : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def __a ( self : Any ):
"""simple docstring"""
self.enable_attention_slicing(_lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self : Optional[int] ):
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , _lowercase : Union[str, List[str]] , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] , _lowercase : str , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
SCREAMING_SNAKE_CASE__ = self.segmentation_model(**_lowercase )
SCREAMING_SNAKE_CASE__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , )
| 219 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
@slow
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__lowerCAmelCase ).to(__lowerCAmelCase )
UpperCAmelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
UpperCAmelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
UpperCAmelCase = model(input_ids.to(__lowerCAmelCase ) , labels=labels.to(__lowerCAmelCase ) ).loss
UpperCAmelCase = -(labels.shape[-1] * loss.item())
UpperCAmelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 358 |
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def lowerCamelCase__ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 91 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 165 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A__ : Union[str, Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase_ )
class __snake_case ( UpperCamelCase_ ):
_a = '''rag'''
_a = True
def __init__( self : str , A_ : List[Any]=None , A_ : str=True , A_ : Tuple=None , A_ : Union[str, Any]=None , A_ : List[str]=None , A_ : List[str]=None , A_ : List[Any]=None , A_ : Union[str, Any]=" / " , A_ : Tuple=" // " , A_ : Any=5 , A_ : Optional[Any]=3_0_0 , A_ : Tuple=7_6_8 , A_ : Union[str, Any]=8 , A_ : Dict="wiki_dpr" , A_ : Optional[Any]="train" , A_ : Dict="compressed" , A_ : Optional[int]=None , A_ : List[str]=None , A_ : str=False , A_ : Dict=False , A_ : Dict=0.0 , A_ : List[str]=True , A_ : List[str]=False , A_ : List[Any]=False , A_ : Any=False , A_ : Optional[int]=True , A_ : int=None , **A_ : List[str] , ):
super().__init__(
bos_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , is_encoder_decoder=A_ , prefix=A_ , vocab_size=A_ , **A_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCAmelCase_ : List[str] = kwargs.pop('''question_encoder''')
lowerCAmelCase_ : Tuple = question_encoder_config.pop('''model_type''')
lowerCAmelCase_ : Tuple = kwargs.pop('''generator''')
lowerCAmelCase_ : Dict = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : int = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : List[Any] = reduce_loss
lowerCAmelCase_ : Optional[Any] = label_smoothing
lowerCAmelCase_ : Union[str, Any] = exclude_bos_score
lowerCAmelCase_ : List[Any] = do_marginalize
lowerCAmelCase_ : int = title_sep
lowerCAmelCase_ : Optional[int] = doc_sep
lowerCAmelCase_ : List[str] = n_docs
lowerCAmelCase_ : int = max_combined_length
lowerCAmelCase_ : Union[str, Any] = dataset
lowerCAmelCase_ : int = dataset_split
lowerCAmelCase_ : Dict = index_name
lowerCAmelCase_ : Union[str, Any] = retrieval_vector_size
lowerCAmelCase_ : Optional[Any] = retrieval_batch_size
lowerCAmelCase_ : List[str] = passages_path
lowerCAmelCase_ : Any = index_path
lowerCAmelCase_ : int = use_dummy_dataset
lowerCAmelCase_ : Tuple = output_retrieved
lowerCAmelCase_ : List[Any] = do_deduplication
lowerCAmelCase_ : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
lowerCAmelCase_ : List[Any] = getattr(self.generator , '''forced_eos_token_id''' , A_)
@classmethod
def UpperCAmelCase__ ( cls : str , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : Any):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = copy.deepcopy(self.__dict__)
lowerCAmelCase_ : Tuple = self.question_encoder.to_dict()
lowerCAmelCase_ : Dict = self.generator.to_dict()
lowerCAmelCase_ : str = self.__class__.model_type
return output
| 103 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
return choice(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Dict = random_pivot(lowerCAmelCase__ )
# partition based on pivot
# linear time
lowerCAmelCase_ : Optional[int] = [e for e in lst if e < pivot]
lowerCAmelCase_ : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCAmelCase__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCAmelCase__ ) < k - 1:
return kth_number(lowerCAmelCase__ , k - len(lowerCAmelCase__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> int:
"""simple docstring"""
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('String lengths must match!' )
lowerCAmelCase_ : List[Any] = 0
for chara, chara in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__ ):
_a = ["audio_values", "audio_mask"]
def __init__( self , _a=2048 , _a=1 , _a=[16, 16] , _a=128 , _a=4_4100 , _a=86 , _a=2048 , _a=0.0 , **_a , ) -> List[Any]:
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
_A : Any = spectrogram_length
_A : Dict = num_channels
_A : Optional[Any] = patch_size
_A : str = feature_size // self.patch_size[1]
_A : List[Any] = n_fft
_A : Optional[Any] = sampling_rate // hop_length_to_sampling_rate
_A : List[str] = sampling_rate
_A : Union[str, Any] = padding_value
_A : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def a__ ( self , _a ) -> np.ndarray:
_A : Optional[int] = spectrogram(
_a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_A : Tuple = log_spec[:, :-1]
_A : int = log_spec - 20.0
_A : Tuple = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_A : int = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_A : Optional[Any] = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
_A : str = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_A : Union[str, Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
_A : Optional[int] = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_A : Any = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_A : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_A : List[str] = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
_A : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_A : int = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_A : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
_A : Union[str, Any] = audio_features[i]
_A : Optional[Any] = feature
# return as BatchFeature
if return_attention_mask:
_A : Optional[Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_A : Any = {"""audio_values""": padded_audio_features}
_A : int = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 26 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[Any] = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''')
SCREAMING_SNAKE_CASE_ : Optional[int] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase).tokenizer
def _SCREAMING_SNAKE_CASE ( self : int , **lowercase_ : str):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase).image_processor
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowercase_ : Any):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase).qformer_tokenizer
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_ : Dict = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0)
SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __UpperCAmelCase)
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(__UpperCAmelCase , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : Any = processor(images=__UpperCAmelCase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : Any = '''lower newer'''
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(text=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : Optional[Any] = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key])
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : Tuple = '''lower newer'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.batch_decode(__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : int = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase)
SCREAMING_SNAKE_CASE_ : Optional[int] = '''lower newer'''
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase)
self.assertListEqual(
list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 354 |
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 8.3_1_4_4_5_9_8
def _A (__a , __a ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase_ : str = 300
UpperCAmelCase_ : str = 28
UpperCAmelCase_ : Any = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 318 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( _lowerCamelCase):
_lowerCAmelCase : List[Any] = """gptj"""
_lowerCAmelCase : Optional[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , lowercase_ : List[Any]=50400 , lowercase_ : Tuple=2048 , lowercase_ : Optional[int]=4096 , lowercase_ : Tuple=28 , lowercase_ : List[Any]=16 , lowercase_ : Union[str, Any]=64 , lowercase_ : str=None , lowercase_ : Tuple="gelu_new" , lowercase_ : Optional[int]=0.0 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : List[Any]=1E-5 , lowercase_ : Tuple=0.02 , lowercase_ : Tuple=True , lowercase_ : List[Any]=50256 , lowercase_ : str=50256 , lowercase_ : Union[str, Any]=False , **lowercase_ : List[str] , ):
snake_case_ : Optional[int] = vocab_size
snake_case_ : int = n_positions
snake_case_ : Optional[Any] = n_embd
snake_case_ : List[Any] = n_layer
snake_case_ : Dict = n_head
snake_case_ : Dict = n_inner
snake_case_ : str = rotary_dim
snake_case_ : Any = activation_function
snake_case_ : Optional[Any] = resid_pdrop
snake_case_ : List[Any] = embd_pdrop
snake_case_ : Tuple = attn_pdrop
snake_case_ : Optional[Any] = layer_norm_epsilon
snake_case_ : Dict = initializer_range
snake_case_ : Optional[Any] = use_cache
snake_case_ : Union[str, Any] = bos_token_id
snake_case_ : Optional[int] = eos_token_id
super().__init__(
bos_token_id=a_ , eos_token_id=a_ , tie_word_embeddings=a_ , **a_ )
class _UpperCAmelCase ( _lowerCamelCase):
def __init__( self : List[str] , lowercase_ : Tuple , lowercase_ : str = "default" , lowercase_ : Any = None , lowercase_ : List[Any] = False , ):
super().__init__(a_ , task=a_ , patching_specs=a_ , use_past=a_ )
if not getattr(self._config , '''pad_token_id''' , a_ ):
# TODO: how to do that better?
snake_case_ : List[str] = 0
@property
def _snake_case ( self : Dict ):
snake_case_ : Any = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction='''inputs''' )
snake_case_ : str = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _snake_case ( self : List[str] ):
return self._config.n_layer
@property
def _snake_case ( self : int ):
return self._config.n_head
def _snake_case ( self : int , lowercase_ : str , lowercase_ : str = -1 , lowercase_ : Union[str, Any] = -1 , lowercase_ : Dict = False , lowercase_ : List[Any] = None , ):
snake_case_ : Dict = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
snake_case_ : int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case_ : Optional[int] = seqlen + 2
snake_case_ : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ : Optional[int] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
snake_case_ : int = common_inputs["""attention_mask"""]
if self.use_past:
snake_case_ : int = ordered_inputs["""attention_mask"""].dtype
snake_case_ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self : Dict ):
return 13
| 264 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
_UpperCAmelCase : Dict = model(a_ )["""last_hidden_state"""]
_UpperCAmelCase : Dict = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape ,a_ )
# compare the actual values for a slice.
_UpperCAmelCase : Tuple = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 215 | 0 |
"""simple docstring"""
def lowerCamelCase (a_ :list[int] , a_ :list[int]) -> None:
lowercase :str = len(a_)
print('''The following activities are selected:''')
# The first activity is always selected
lowercase :Any = 0
print(a_ , end=''',''')
# Consider rest of the activities
for j in range(a_):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(a_ , end=''',''')
lowercase :int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = [1, 3, 0, 5, 8, 5]
UpperCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 368 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase (a_ :Optional[int] , a_ :Union[str, Any] , a_ :Optional[Any]=None) -> List[Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
lowercase :int = nn.Parameter(a_)
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
lowercase :Tuple = nn.Parameter(a_)
def lowerCamelCase (a_ :int , a_ :Any , a_ :Optional[int]) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowercase :str = np.asarray(weights[0])
lowercase :List[Any] = np.asarray(weights[1])
lowercase :Optional[int] = np.asarray(weights[2])
set_param(
torch_layer.self_attention.query_key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , )
def lowerCamelCase (a_ :str , a_ :Any , a_ :Union[str, Any]) -> Dict:
# set torch weights for 1-to-1 comparison
lowercase :str = np.asarray(weights[0])
lowercase :Dict = np.asarray(weights[1])
lowercase :Dict = np.asarray(weights[2])
lowercase :Optional[Any] = np.asarray(weights[3])
set_param(
torch_layer.self_attention.query , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_).transpose(1 , 2).contiguous().view(-1 , a_) , )
set_param(
torch_layer.output.dense , torch.tensor(a_).view(-1 , a_).contiguous().transpose(0 , 1) , )
def lowerCamelCase (a_ :Union[str, Any] , a_ :Dict , a_ :Optional[int]) -> Optional[Any]:
# layernorm 1
lowercase :Optional[int] = weights[0][0][0]
lowercase :Union[str, Any] = np.asarray(layer_norm_a[0])
lowercase :List[str] = np.asarray(layer_norm_a[1])
set_param(
torch_block.attention.layer_norm , torch.tensor(a_) , torch.tensor(a_) , )
# lsh weights + output
lowercase :Optional[Any] = weights[0][1]
if len(a_) < 4:
set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_)
else:
set_layer_weights_in_torch_local(a_ , torch_block.attention , a_)
# intermediate weighs
lowercase :Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(a_) == 4:
lowercase :int = intermediate_weights[2]
# layernorm 2
lowercase :int = np.asarray(intermediate_weights[0][0])
lowercase :Union[str, Any] = np.asarray(intermediate_weights[0][1])
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a_) , torch.tensor(a_) , )
# intermediate dense
lowercase :Dict = np.asarray(intermediate_weights[1][0])
lowercase :Optional[Any] = np.asarray(intermediate_weights[1][1])
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , )
# intermediate out
lowercase :Union[str, Any] = np.asarray(intermediate_weights[4][0])
lowercase :Tuple = np.asarray(intermediate_weights[4][1])
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , )
def lowerCamelCase (a_ :Tuple , a_ :Dict , a_ :Tuple) -> str:
# reformer model
lowercase :Union[str, Any] = torch_model.reformer
# word embeds
lowercase :Tuple = np.asarray(weights[1])
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_) , )
if isinstance(weights[3] , a_):
lowercase :str = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights)):
lowercase :List[str] = np.asarray(weights[3][emb_idx][0])
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
lowercase :int = nn.Parameter(torch.tensor(a_))
lowercase :Dict = weights[5]
assert len(torch_model_reformer.encoder.layers) * 4 == len(
a_), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers):
lowercase :Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a_ , a_ , a_)
# output layer norm
lowercase :Dict = np.asarray(weights[7][0])
lowercase :Optional[Any] = np.asarray(weights[7][1])
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a_) , torch.tensor(a_) , )
# output embeddings
lowercase :str = np.asarray(weights[9][0])
lowercase :Union[str, Any] = np.asarray(weights[9][1])
set_param(
torch_model.lm_head.decoder , torch.tensor(a_).transpose(0 , 1).contiguous() , torch.tensor(a_) , )
def lowerCamelCase (a_ :Optional[Any] , a_ :List[Any] , a_ :Tuple) -> Union[str, Any]:
# Initialise PyTorch model
lowercase :Optional[Any] = ReformerConfig.from_json_file(a_)
print(F"""Building PyTorch model from configuration: {config}""")
lowercase :Dict = ReformerModelWithLMHead(a_)
with open(a_ , '''rb''') as f:
lowercase :Tuple = pickle.load(a_)['''weights''']
set_model_weights_in_torch(a_ , a_ , config.hidden_size)
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , a_)
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 172 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase : Optional[Any] =subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCamelCase : str =subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
lowerCamelCase : List[Any] ='''|'''.join(sys.argv[1:])
lowerCamelCase : str =re.compile(RF"""^({joined_dirs}).*?\.py$""")
lowerCamelCase : Optional[int] =[x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 189 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = StableDiffusionPanoramaPipeline
UpperCAmelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
'''simple docstring'''
torch.manual_seed(0)
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A__ = DDIMScheduler()
torch.manual_seed(0)
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A__ = CLIPTextModel(UpperCAmelCase__)
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
A__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]=0) ->Optional[Any]:
'''simple docstring'''
A__ = torch.manual_seed(UpperCAmelCase__)
A__ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__)
A__ = sd_pipe.to(UpperCAmelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = sd_pipe(**UpperCAmelCase__).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__)
A__ = sd_pipe.to(UpperCAmelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = '''french fries'''
A__ = sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__)
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__)
A__ = sd_pipe.to(UpperCAmelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = sd_pipe(**UpperCAmelCase__ , view_batch_size=2)
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''')
A__ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__)
A__ = sd_pipe.to(UpperCAmelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = sd_pipe(**UpperCAmelCase__).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCAmelCase__)
A__ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__)
A__ = sd_pipe.to(UpperCAmelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = sd_pipe(**UpperCAmelCase__).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[str]=0) ->Union[str, Any]:
'''simple docstring'''
A__ = torch.manual_seed(UpperCAmelCase__)
A__ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
A__ = '''stabilityai/stable-diffusion-2-base'''
A__ = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''')
A__ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__)
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCAmelCase__).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
A__ = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
])
assert np.abs(expected_slice - image_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
A__ = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCAmelCase__)
A__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing()
A__ = self.get_inputs()
A__ = pipe(**UpperCAmelCase__).images
A__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
A__ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = 0
def callback_fn(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor) -> None:
A__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
A__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
A__ = False
A__ = '''stabilityai/stable-diffusion-2-base'''
A__ = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''')
A__ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__)
A__ = pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing()
A__ = self.get_inputs()
pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = '''stabilityai/stable-diffusion-2-base'''
A__ = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''')
A__ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__)
A__ = pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
A__ = self.get_inputs()
A__ = pipe(**UpperCAmelCase__)
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 231 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ )
A__ = checkpoints.load_tax_checkpoint(lowercase_ )
A__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
A__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
A__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''encoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_global_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = tax_mlp_layer_norm
A__ = flax_model_encoder_layer_block
# Only for layer 0:
A__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
A__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
A__ = tax_encoder_global_rel_embedding
# Assigning
A__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
A__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
A__ = f"""layers_{str(lowercase_ )}"""
# Self-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
A__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
A__ = tax_enc_dec_attention_module['''key''']['''kernel''']
A__ = tax_enc_dec_attention_module['''out''']['''kernel''']
A__ = tax_enc_dec_attention_module['''query''']['''kernel''']
A__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
A__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
A__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
A__ = flax_model.params['''decoder''']['''block'''][str(lowercase_ )]['''layer''']
A__ = tax_attention_key
A__ = tax_attention_out
A__ = tax_attention_query
A__ = tax_attention_value
A__ = tax_pre_attention_layer_norm
A__ = tax_enc_dec_attention_key
A__ = tax_enc_dec_attention_out
A__ = tax_enc_dec_attention_query
A__ = tax_enc_dec_attention_value
A__ = tax_cross_layer_norm
if split_mlp_wi:
A__ = tax_mlp_wi_a
A__ = tax_mlp_wi_a
else:
A__ = tax_mlp_wi
A__ = tax_mlp_wo
A__ = txa_mlp_layer_norm
A__ = flax_model_decoder_layer_block
# Decoder Normalization
A__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
A__ = txa_decoder_norm
# Only for layer 0:
A__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
A__ = tax_decoder_rel_embedding
# Token Embeddings
A__ = tax_model['''target''']['''token_embedder''']['''embedding''']
A__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
A__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(lowercase_ )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 231 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __A :
'''simple docstring'''
def __init__( self : Dict ,_snake_case : Union[str, Any]=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=64 ,_snake_case : Optional[int]=None ) -> int:
"""simple docstring"""
lowercase__ : Any = np.random.default_rng(_snake_case )
lowercase__ : str = length
lowercase__ : List[str] = rng.normal(size=(length,) ).astype(np.floataa )
lowercase__ : Dict = a * self.x + b + rng.normal(scale=0.1 ,size=(length,) ).astype(np.floataa )
def __len__( self : str ) -> int:
"""simple docstring"""
return self.length
def __getitem__( self : Tuple ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __A ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : Tuple=0 ,_snake_case : Optional[Any]=0 ,_snake_case : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ : Tuple = True
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple=None ) -> str:
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ : Any = False
return x * self.a[0] + self.b[0]
class __A ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : List[Any]=0 ,_snake_case : Optional[Any]=0 ,_snake_case : Optional[Any]=False ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : int = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
lowercase__ : Tuple = torch.nn.Parameter(torch.tensor(_snake_case ).float() )
lowercase__ : Any = True
def UpperCAmelCase ( self : int ,_snake_case : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ : List[str] = False
return x * self.a + self.b
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : Tuple = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowercase__ : str = load_dataset('''csv''' , data_files=__lowerCamelCase )
lowercase__ : Union[str, Any] = datasets['''train'''].unique('''label''' )
lowercase__ : List[Any] = {v: i for i, v in enumerate(__lowerCamelCase )}
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : int = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
if "label" in examples:
lowercase__ : str = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Any = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowercase__ : int = DataLoader(tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=2 )
lowercase__ : Optional[Any] = DataLoader(tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 16 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16 | 1 |
'''simple docstring'''
import os
lowerCamelCase : str = {"I": 1, "V": 5, "X": 1_0, "L": 5_0, "C": 1_0_0, "D": 5_0_0, "M": 1_0_0_0}
def _lowerCAmelCase ( _UpperCamelCase : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
while index < len(_UpperCamelCase ) - 1:
_SCREAMING_SNAKE_CASE =SYMBOLS[numerals[index]]
_SCREAMING_SNAKE_CASE =SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowerCAmelCase ( _UpperCamelCase : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =num // 10_00
numerals += m_count * "M"
num %= 10_00
_SCREAMING_SNAKE_CASE =num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
_SCREAMING_SNAKE_CASE =num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowerCAmelCase ( _UpperCamelCase : str = "/p089_roman.txt" ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
with open(os.path.dirname(_UpperCamelCase ) + roman_numerals_filename ) as filea:
_SCREAMING_SNAKE_CASE =filea.readlines()
for line in lines:
_SCREAMING_SNAKE_CASE =line.strip()
_SCREAMING_SNAKE_CASE =parse_roman_numerals(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =generate_roman_numerals(_UpperCamelCase )
savings += len(_UpperCamelCase ) - len(_UpperCamelCase )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 357 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
__lowercase = 384
__lowercase = 7
if "tiny" in model_name:
__lowercase = 96
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 12, 24)
elif "small" in model_name:
__lowercase = 96
__lowercase = (2, 2, 18, 2)
__lowercase = (3, 6, 12, 24)
elif "base" in model_name:
__lowercase = 128
__lowercase = (2, 2, 18, 2)
__lowercase = (4, 8, 16, 32)
__lowercase = 12
__lowercase = 512
elif "large" in model_name:
__lowercase = 192
__lowercase = (2, 2, 18, 2)
__lowercase = (6, 12, 24, 48)
__lowercase = 12
__lowercase = 768
# set label information
__lowercase = 150
__lowercase = 'huggingface/label-files'
__lowercase = 'ade20k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , num_heads=SCREAMING_SNAKE_CASE , window_size=SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
__lowercase = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE , auxiliary_in_channels=SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
__lowercase = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
__lowercase = dct.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
__lowercase , __lowercase = x.shape
__lowercase = x.reshape(SCREAMING_SNAKE_CASE , 4 , in_channel // 4 )
__lowercase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return x
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
__lowercase , __lowercase = x.shape
__lowercase = x.reshape(SCREAMING_SNAKE_CASE , in_channel // 4 , 4 )
__lowercase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return x
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
__lowercase = x.shape[0]
__lowercase = x.reshape(4 , in_channel // 4 )
__lowercase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE )
return x
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = x.shape[0]
__lowercase = x.reshape(in_channel // 4 , 4 )
__lowercase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE )
return x
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> Dict:
__lowercase = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
__lowercase = model_name_to_url[model_name]
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE )[
'state_dict'
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE , param.shape )
__lowercase = get_upernet_config(SCREAMING_SNAKE_CASE )
__lowercase = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
if "bn" in key:
__lowercase = key.replace('bn' , 'batch_norm' )
__lowercase = val
# rename keys
__lowercase = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowercase = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE )
if "norm" in key:
__lowercase = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify on image
__lowercase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
__lowercase = SegformerImageProcessor()
__lowercase = processor(SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
__lowercase = model(SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowercase = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
__lowercase = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
__lowercase = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
__lowercase = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = ''
lowerCAmelCase = ''
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 2_56
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = cva.imread(_snake_case , 0 )
lowerCAmelCase = copy.deepcopy(self.img )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
lowerCAmelCase = np.sum(_snake_case )
for i in range(len(_snake_case ) ):
lowerCAmelCase = x[i] / self.k
self.sk += prk
lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase = int(last % last )
lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_snake_case )
lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ ( self ):
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[int] , **lowerCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[Any] , lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , **lowerCAmelCase : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] ={}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE_: int =kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE_: Optional[int] =kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[Any]="This is a photo of {}." ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =load_image(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =self.image_processor(images=[image] , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_: List[Any] =candidate_labels
SCREAMING_SNAKE_CASE_: List[str] =[hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
SCREAMING_SNAKE_CASE_: Optional[Any] =self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =[text_inputs]
return inputs
def lowerCamelCase__ ( self : str , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =model_inputs.pop("""candidate_labels""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int =text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE_: Dict =text_inputs[0][0]
SCREAMING_SNAKE_CASE_: Any =self.model(**__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_outputs.pop("""candidate_labels""" )
SCREAMING_SNAKE_CASE_: List[str] =model_outputs["""logits"""][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE_: List[str] =logits.softmax(dim=-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict =[scores]
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_: Any =stable_softmax(__UpperCAmelCase , axis=-1 )
SCREAMING_SNAKE_CASE_: List[Any] =probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
SCREAMING_SNAKE_CASE_: Tuple =[
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda lowerCAmelCase : -x[0] )
]
return result
| 173 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( _A = 100 ):
a : Optional[int] = set()
a : Dict = 0
a : List[str] = n + 1 # maximum limit
for a in range(2 , lowerCAmelCase__ ):
for b in range(2 , lowerCAmelCase__ ):
a : Tuple = a**b # calculates the current power
collect_powers.add(lowerCAmelCase__ ) # adds the result to the set
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip()))) | 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase: Optional[int] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: int = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 96 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "data2vec-text"
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Any=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :str="absolute" , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[Any] =vocab_size
_a : Optional[Any] =hidden_size
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Any =intermediate_size
_a : str =hidden_dropout_prob
_a : Optional[Any] =attention_probs_dropout_prob
_a : Optional[Any] =max_position_embeddings
_a : Union[str, Any] =type_vocab_size
_a : Tuple =initializer_range
_a : Optional[int] =layer_norm_eps
_a : Tuple =position_embedding_type
_a : int =use_cache
_a : List[str] =classifier_dropout
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Tuple ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276 |
'''simple docstring'''
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> dict:
_a : Any =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> list[dict]:
_a : Union[str, Any] ="""https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_a : int =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> str:
_a : Union[str, Any] =hackernews_top_stories(_UpperCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : list[list[int]] =[[0 for _ in range(__lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase : List[str] =1
for n in range(m + 1 ):
for k in range(1 , __lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase =int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
lowercase =int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 357 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase =logging.getLogger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case=None) -> List[Any]:
'''simple docstring'''
super().__init__(
snake_case , question_encoder_tokenizer=snake_case , generator_tokenizer=snake_case , index=snake_case , init_retrieval=snake_case , )
_UpperCAmelCase : Union[str, Any] =None
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
_UpperCAmelCase : Optional[Any] =self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCAmelCase : Optional[int] =str(distributed_port + 1)
_UpperCAmelCase : Any =dist.new_group(ranks=snake_case , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
return dist.get_rank(group=self.process_group) == 0
def lowerCAmelCase ( self , snake_case , snake_case , snake_case=torch.floataa) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =torch.empty(snake_case , dtype=snake_case)
dist.scatter(snake_case , src=0 , scatter_list=snake_case , group=self.process_group)
return target_tensor
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCAmelCase : str =next((addr for addr in addrs if addr.startswith('e')) , snake_case)
return ifname
def lowerCAmelCase ( self , snake_case , snake_case) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
# single GPU training
if not dist.is_initialized():
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self._main_retrieve(snake_case , snake_case)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case)
# distributed training
_UpperCAmelCase : Optional[int] =dist.get_world_size(group=self.process_group)
# gather logic
_UpperCAmelCase : str =None
if self._is_main():
_UpperCAmelCase : Union[str, Any] =[torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(snake_case)]
dist.gather(torch.tensor(snake_case) , dst=0 , gather_list=snake_case , group=self.process_group)
# scatter logic
_UpperCAmelCase : Optional[Any] =question_hidden_states.shape[0]
_UpperCAmelCase : List[Any] =[]
_UpperCAmelCase : Any =[]
if self._is_main():
assert len(snake_case) == world_size
_UpperCAmelCase , _UpperCAmelCase : Tuple =self._main_retrieve(torch.cat(snake_case).numpy() , snake_case)
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =torch.tensor(snake_case), torch.tensor(snake_case)
_UpperCAmelCase : List[str] =self._chunk_tensor(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =self._chunk_tensor(snake_case , snake_case)
_UpperCAmelCase : int =self._scattered(snake_case , [n_queries, n_docs] , target_type=torch.intaa)
_UpperCAmelCase : Dict =self._scattered(snake_case , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case)
| 242 | 0 |
"""simple docstring"""
import heapq
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: str = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__UpperCAmelCase , [-1 * len(__UpperCAmelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase__: Any = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase__: List[Any] = heapq.heappop(__UpperCAmelCase )[1][0]
chosen_vertices.add(__UpperCAmelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase__: Any = elem[1][1].index(__UpperCAmelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__UpperCAmelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 177 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = (DPMSolverSDEScheduler,)
_UpperCAmelCase = 1_0
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCAmelCase__ )
return config
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Tuple = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Any = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Dict = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Dict = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : str = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = output.prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : int = torch.sum(torch.abs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 361 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a__ ( A__ ):
if is_torch_version('<', '2.0.0' ) or not hasattr(A__, '_dynamo' ):
return False
return isinstance(A__, torch._dynamo.eval_frame.OptimizedModule )
def a__ ( A__, A__ = True ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ : List[str] = is_compiled_module(A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[Any] = model
SCREAMING_SNAKE_CASE_ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ : str = getattr(A__, 'forward' )
SCREAMING_SNAKE_CASE_ : Any = model.__dict__.pop('_original_forward', A__ )
if original_forward is not None:
while hasattr(A__, '__wrapped__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ : Any = forward
if getattr(A__, '_converted_to_transformer_engine', A__ ):
convert_model(A__, to_transformer_engine=A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[str] = model
SCREAMING_SNAKE_CASE_ : Dict = compiled_model
return model
def a__ ( ):
PartialState().wait_for_everyone()
def a__ ( A__, A__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__, A__ )
elif PartialState().local_process_index == 0:
torch.save(A__, A__ )
@contextmanager
def a__ ( **A__ ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ : List[Any] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a__ ( A__ ):
if not hasattr(A__, '__qualname__' ) and not hasattr(A__, '__name__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(A__, '__class__', A__ )
if hasattr(A__, '__qualname__' ):
return obj.__qualname__
if hasattr(A__, '__name__' ):
return obj.__name__
return str(A__ )
def a__ ( A__, A__ ):
for key, value in source.items():
if isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = destination.setdefault(A__, {} )
merge_dicts(A__, A__ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = value
return destination
def a__ ( A__ = None ):
if port is None:
SCREAMING_SNAKE_CASE_ : Tuple = 2_9_5_0_0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 162 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Dict = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
SCREAMING_SNAKE_CASE_ : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE_ : str = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_))
| 91 | 0 |
"""simple docstring"""
from math import sqrt
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 369 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
_snake_case = [False] * len(__lowerCamelCase )
_snake_case = []
queue.append(__lowerCamelCase )
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
_snake_case = True
_snake_case = u
return visited[t]
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> Dict:
# This array is filled by BFS and to store path
_snake_case = [-1] * (len(__lowerCamelCase ))
_snake_case = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = float('''Inf''' )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(__lowerCamelCase , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
return max_flow
UpperCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase__ , UpperCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 40 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
a__ = """AutoTokenizer"""
a__ = ["""tokenizer"""]
a__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , lowercase , lowercase=None) -> str:
'''simple docstring'''
super().__init__(lowercase)
a__: Optional[Any] = speaker_embeddings
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase) -> Union[str, Any]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
a__: Union[str, Any] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase) , cache_dir=kwargs.pop('cache_dir' , lowercase) , force_download=kwargs.pop('force_download' , lowercase) , proxies=kwargs.pop('proxies' , lowercase) , resume_download=kwargs.pop('resume_download' , lowercase) , local_files_only=kwargs.pop('local_files_only' , lowercase) , use_auth_token=kwargs.pop('use_auth_token' , lowercase) , revision=kwargs.pop('revision' , lowercase) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(lowercase , lowercase)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.')
a__: str = None
else:
with open(lowercase) as speaker_embeddings_json:
a__: Tuple = json.load(lowercase)
else:
a__: Optional[int] = None
a__: int = AutoTokenizer.from_pretrained(lowercase , **lowercase)
return cls(tokenizer=lowercase , speaker_embeddings=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ) -> List[str]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2') , exist_ok=lowercase)
a__: Optional[int] = {}
a__: Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
a__: int = self._load_voice_preset(lowercase)
a__: Optional[int] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , f'{prompt_key}_{key}') , voice_preset[key] , allow_pickle=lowercase , )
a__: str = os.path.join(lowercase , f'{prompt_key}_{key}.npy')
a__: Optional[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase) , 'w') as fp:
json.dump(lowercase , lowercase)
super().save_pretrained(lowercase , lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase = None , **lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = self.speaker_embeddings[voice_preset]
a__: int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].')
a__: Any = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/') , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase) , cache_dir=kwargs.pop('cache_dir' , lowercase) , force_download=kwargs.pop('force_download' , lowercase) , proxies=kwargs.pop('proxies' , lowercase) , resume_download=kwargs.pop('resume_download' , lowercase) , local_files_only=kwargs.pop('local_files_only' , lowercase) , use_auth_token=kwargs.pop('use_auth_token' , lowercase) , revision=kwargs.pop('revision' , lowercase) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/") , voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.')
a__: Dict = np.load(lowercase)
return voice_preset_dict
def lowerCamelCase_ ( self , lowercase = None) -> Dict:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.')
if not isinstance(voice_preset[key] , np.ndarray):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.')
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.')
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=2_56 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ) -> Optional[int]:
'''simple docstring'''
if voice_preset is not None and not isinstance(lowercase , lowercase):
if (
isinstance(lowercase , lowercase)
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
a__: List[str] = self._load_voice_preset(lowercase)
else:
if isinstance(lowercase , lowercase) and not voice_preset.endswith('.npz'):
a__: Optional[Any] = voice_preset + '.npz'
a__: int = np.load(lowercase)
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase)
a__: int = BatchFeature(data=lowercase , tensor_type=lowercase)
a__: int = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
a__: Dict = voice_preset
return encoded_text
| 290 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 1 |
def lowerCAmelCase__ ( _a : int ):
snake_case_ : List[str] = abs(_a )
snake_case_ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCAmelCase__ ( _a : int ):
snake_case_ : Optional[Any] = abs(_a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCAmelCase__ ( _a : int ):
return sum(int(_a ) for c in str(abs(_a ) ) )
def lowerCAmelCase__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
snake_case_ : List[str] = F'''{func.__name__}({value})'''
snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="import __main__" )
print(F'''{call:56} = {func(_a )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 36 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase : Any = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoConfig.from_pretrained('''gpt2''' )
__lowerCamelCase = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
__lowerCamelCase = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = generation_config.update(**SCREAMING_SNAKE_CASE_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
__lowerCamelCase = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
__lowerCamelCase = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 330 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : List[str] = 1_6
lowercase__ : str = 3_2
def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCamelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase = 8
else:
__UpperCamelCase = None
return tokenizer.pad(
snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
__UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Union[str, Any] = mocked_dataloaders # noqa: F811
def A_ ( snake_case : List[str] , snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1":
__UpperCamelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['''lr''']
__UpperCamelCase = int(config['''num_epochs'''] )
__UpperCamelCase = int(config['''seed'''] )
__UpperCamelCase = int(config['''batch_size'''] )
set_seed(snake_case )
__UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case )
__UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCamelCase = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case ),
'''epoch''': epoch,
} , step=snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A_ ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 328 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Union[str, Any] , A : float , A : Callable , A : int , A : float = 1.0 , A : str = None , ):
'''simple docstring'''
super().__init__()
a : List[str] = initial_learning_rate
a : int = warmup_steps
a : Tuple = power
a : List[str] = decay_schedule_fn
a : int = name
def __call__( self : int , A : List[Any] ):
'''simple docstring'''
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
a : Any = tf.cast(A , tf.floataa )
a : List[str] = tf.cast(self.warmup_steps , tf.floataa )
a : Dict = global_step_float / warmup_steps_float
a : Tuple = self.initial_learning_rate * tf.math.pow(A , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=A , )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def snake_case (A_ :float , A_ :int , A_ :int , A_ :float = 0.0 , A_ :float = 0.9 , A_ :float = 0.999 , A_ :float = 1E-8 , A_ :Optional[float] = None , A_ :Optional[float] = None , A_ :float = 0.0 , A_ :float = 1.0 , A_ :Optional[List[str]] = None , ):
'''simple docstring'''
a : Any = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=A_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=A_ , )
if num_warmup_steps:
a : str = WarmUp(
initial_learning_rate=A_ , decay_schedule_fn=A_ , warmup_steps=A_ , )
if weight_decay_rate > 0.0:
a : Any = AdamWeightDecay(
learning_rate=A_ , weight_decay_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=A_ , )
else:
a : Dict = tf.keras.optimizers.Adam(
learning_rate=A_ , beta_a=A_ , beta_a=A_ , epsilon=A_ , clipnorm=A_ , global_clipnorm=A_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case ( UpperCAmelCase ):
def __init__( self : int , A : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_01 , A : float = 0.9 , A : float = 0.9_99 , A : float = 1E-7 , A : bool = False , A : float = 0.0 , A : Optional[List[str]] = None , A : Optional[List[str]] = None , A : str = "AdamWeightDecay" , **A : List[Any] , ):
'''simple docstring'''
super().__init__(A , A , A , A , A , A , **A )
a : Union[str, Any] = weight_decay_rate
a : Tuple = include_in_weight_decay
a : List[Any] = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : int , A : Dict ):
'''simple docstring'''
a : List[str] = {'WarmUp': WarmUp}
return super(A , cls ).from_config(A , custom_objects=A )
def lowerCamelCase__ ( self : Union[str, Any] , A : Union[str, Any] , A : Dict , A : Optional[Any] ):
'''simple docstring'''
super(A , self )._prepare_local(A , A , A )
a : str = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowerCamelCase__ ( self : Optional[int] , A : Dict , A : List[str] , A : Dict ):
'''simple docstring'''
a : Dict = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : int , A : Tuple , A : Any=None , **A : List[Any] ):
'''simple docstring'''
a : Dict = list(zip(*A ) )
return super(A , self ).apply_gradients(zip(A , A ) , name=A , **A )
def lowerCamelCase__ ( self : List[Any] , A : Optional[Any] , A : List[Any] , A : int ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
a : str = apply_state or {}
a : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
a : Optional[int] = self._fallback_apply_state(A , A )
a : Dict = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : List[str] , A : Dict , A : Union[str, Any] , A : Any=None ):
'''simple docstring'''
a : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , A )
a : Any = self._decay_weights_op(A , A , A )
with tf.control_dependencies([decay] ):
return super(A , self )._resource_apply_dense(A , A , **A )
def lowerCamelCase__ ( self : str , A : Union[str, Any] , A : str , A : Dict , A : Any=None ):
'''simple docstring'''
a : List[str] = self._get_lr(var.device , var.dtype.base_dtype , A )
a : str = self._decay_weights_op(A , A , A )
with tf.control_dependencies([decay] ):
return super(A , self )._resource_apply_sparse(A , A , A , **A )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Tuple = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : Optional[Any] , A : Dict ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(A , A ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(A , A ) is not None:
return False
return True
class snake_case ( UpperCAmelCase ):
def __init__( self : List[Any] ):
'''simple docstring'''
a : List[Any] = []
a : Dict = None
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self._accum_steps is None:
a : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=A , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Dict , A : Tuple ):
'''simple docstring'''
if not self._gradients:
a : Dict = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(A ) , trainable=A , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(A ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(A )}''' )
for accum_gradient, gradient in zip(self._gradients , A ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(A )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(A ) )
| 364 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = '▁'
_UpperCamelCase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
_UpperCamelCase : Optional[int] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
_UpperCamelCase : List[str] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
_UpperCamelCase : List[str] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class snake_case ( UpperCAmelCase ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = []
__magic_name__ = []
def __init__( self : List[str] , A : Union[str, Any] , A : List[Any]="<s>" , A : Dict="</s>" , A : List[Any]="</s>" , A : Any="<s>" , A : Dict="<unk>" , A : Any="<pad>" , A : Optional[int]="<mask>" , A : str=None , A : Tuple=None , A : List[str]=None , A : Optional[Dict[str, Any]] = None , A : Any=None , A : List[Any]=False , **A : Tuple , ):
'''simple docstring'''
a : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
a : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , tokenizer_file=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A , **A , )
a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
a : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a : Any = 1
a : int = len(self.sp_model )
a : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A )
}
a : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
a : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a : Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
a : List[Any] = self.lang_code_to_id[self._src_lang]
a : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
'''simple docstring'''
a : Dict = self.__dict__.copy()
a : int = None
a : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , A : Any ):
'''simple docstring'''
a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : Any = {}
a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Dict , A : str ):
'''simple docstring'''
a : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
a : Tuple = [1] * len(self.prefix_tokens )
a : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def lowerCamelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None ):
'''simple docstring'''
a : List[str] = [self.sep_token_id]
a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[str] , A : Optional[int] , A : str , A : Optional[str] , A : Optional[str] , **A : str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a : Any = src_lang
a : Any = self(A , add_special_tokens=A , return_tensors=A , **A )
a : Tuple = self.convert_tokens_to_ids(A )
a : Optional[Any] = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Any , A : str ):
'''simple docstring'''
return self.sp_model.encode(A , out_type=A )
def lowerCamelCase__ ( self : Union[str, Any] , A : Tuple ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a : int = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Tuple , A : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : List[str] , A : Dict ):
'''simple docstring'''
a : List[str] = ''.join(A ).replace(A , ' ' ).strip()
return out_string
def lowerCamelCase__ ( self : Any , A : str , A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a : Optional[int] = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , 'wb' ) as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def lowerCamelCase__ ( self : Any , A : List[str] , A : str = "eng_Latn" , A : Optional[List[str]] = None , A : str = "fra_Latn" , **A : Optional[int] , ):
'''simple docstring'''
a : Union[str, Any] = src_lang
a : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(A , A , **A )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Union[str, Any] , A : Dict ):
'''simple docstring'''
a : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a : List[Any] = []
a : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
a : Union[str, Any] = [self.cur_lang_code]
a : List[str] = [self.eos_token_id]
def lowerCamelCase__ ( self : Optional[Any] , A : str ):
'''simple docstring'''
a : Tuple = self.lang_code_to_id[lang]
if self.legacy_behaviour:
a : List[str] = []
a : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
a : Union[str, Any] = [self.cur_lang_code]
a : List[str] = [self.eos_token_id]
| 186 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 251 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[int]= logging.get_logger(__name__)
_a : Dict= {"vocab_file": "spiece.model"}
_a : int= {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
_a : int= {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_a : Optional[Any]= "▁"
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : Optional[Any] , _A : List[str] , _A : int=True , _A : Optional[int]=True , _A : Any=False , _A : str="[CLS]" , _A : Dict="[SEP]" , _A : Any="<unk>" , _A : List[Any]="[SEP]" , _A : Any="<pad>" , _A : List[str]="[CLS]" , _A : int="[MASK]" , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : Dict = (
AddedToken(_A , lstrip=_A , rstrip=_A , normalized=_A)
if isinstance(_A , _A)
else mask_token
)
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__snake_case : Optional[int] = do_lower_case
__snake_case : Any = remove_space
__snake_case : Any = keep_accents
__snake_case : Dict = vocab_file
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_A)
@property
def _lowercase (self : str) -> Optional[Any]:
return len(self.sp_model)
def _lowercase (self : Dict) -> List[str]:
__snake_case : int = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self : Optional[int]) -> int:
__snake_case : List[Any] = self.__dict__.copy()
__snake_case : Any = None
return state
def __setstate__(self : Union[str, Any] , _A : Optional[Any]) -> int:
__snake_case : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case : Optional[int] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowercase (self : Tuple , _A : int) -> int:
if self.remove_space:
__snake_case : int = ' '.join(inputs.strip().split())
else:
__snake_case : str = inputs
__snake_case : Optional[int] = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__snake_case : Tuple = unicodedata.normalize('NFKD' , _A)
__snake_case : Optional[Any] = ''.join([c for c in outputs if not unicodedata.combining(_A)])
if self.do_lower_case:
__snake_case : Union[str, Any] = outputs.lower()
return outputs
def _lowercase (self : Any , _A : str) -> List[str]:
__snake_case : Union[str, Any] = self.preprocess_text(_A)
__snake_case : Optional[Any] = self.sp_model.encode(_A , out_type=_A)
__snake_case : Tuple = []
for piece in pieces:
if len(_A) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__snake_case : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__snake_case : Any = cur_pieces[1:]
else:
__snake_case : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_A)
else:
new_pieces.append(_A)
return new_pieces
def _lowercase (self : Optional[Any] , _A : List[str]) -> int:
return self.sp_model.PieceToId(_A)
def _lowercase (self : Optional[int] , _A : Tuple) -> Union[str, Any]:
return self.sp_model.IdToPiece(_A)
def _lowercase (self : Union[str, Any] , _A : Union[str, Any]) -> Dict:
__snake_case : Any = []
__snake_case : Dict = ''
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A) + token
__snake_case : List[str] = True
__snake_case : int = []
else:
current_sub_tokens.append(_A)
__snake_case : int = False
out_string += self.sp_model.decode(_A)
return out_string.strip()
def _lowercase (self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Dict = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase (self : Tuple , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
if token_ids_a is not None:
return [1] + ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1]
return [1] + ([0] * len(_A)) + [1]
def _lowercase (self : str , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : int = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : Optional[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_A):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case : Optional[Any] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _A)
elif not os.path.isfile(self.vocab_file):
with open(_A , 'wb') as fi:
__snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A)
return (out_vocab_file,)
| 172 | 0 |
def lowerCAmelCase__ ( a__ = 1_000_000 ) ->int:
'''simple docstring'''
_UpperCamelCase = limit + 1
_UpperCamelCase = [0] * limit
for first_term in range(1 , lowerCAmelCase__ ):
for n in range(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_UpperCamelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 360 | import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Optional[Any]=6 , lowercase_ : int=17 , lowercase_ : List[Any]=23 , lowercase_ : List[Any]=11 , lowercase_ : Dict=True , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = act_dim
_UpperCamelCase = state_dim
_UpperCamelCase = hidden_size
_UpperCamelCase = max_length
_UpperCamelCase = is_training
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = floats_tensor((self.batch_size, self.seq_length, 1))
_UpperCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000)
_UpperCamelCase = random_attention_mask((self.batch_size, self.seq_length))
_UpperCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCAmelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict , ) -> int:
"""simple docstring"""
_UpperCamelCase = DecisionTransformerModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (DecisionTransformerModel,) if is_torch_available() else ()
__A = ()
__A = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__A = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = DecisionTransformerModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
@slow
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DecisionTransformerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(lowercase_)] , lowercase_)
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 2 # number of steps of autoregressive prediction we will perform
_UpperCamelCase = 10 # defined by the RL environment, may be normalized
_UpperCamelCase = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert")
_UpperCamelCase = model.to(lowercase_)
_UpperCamelCase = model.config
torch.manual_seed(0)
_UpperCamelCase = torch.randn(1 , 1 , config.state_dim).to(device=lowercase_ , dtype=torch.floataa) # env.reset()
_UpperCamelCase = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=lowercase_)
_UpperCamelCase = torch.tensor(lowercase_ , device=lowercase_ , dtype=torch.floataa).reshape(1 , 1 , 1)
_UpperCamelCase = state
_UpperCamelCase = torch.zeros(1 , 0 , config.act_dim , device=lowercase_ , dtype=torch.floataa)
_UpperCamelCase = torch.zeros(1 , 0 , device=lowercase_ , dtype=torch.floataa)
_UpperCamelCase = torch.tensor(0 , device=lowercase_ , dtype=torch.long).reshape(1 , 1)
for step in range(lowercase_):
_UpperCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowercase_)] , dim=1)
_UpperCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=lowercase_)] , dim=1)
_UpperCamelCase = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = model(
states=lowercase_ , actions=lowercase_ , rewards=lowercase_ , returns_to_go=lowercase_ , timesteps=lowercase_ , attention_mask=lowercase_ , return_dict=lowercase_ , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=lowercase_ , dtype=torch.floataa),
1.0,
False,
{},
)
_UpperCamelCase = action_pred[0, -1]
_UpperCamelCase = torch.cat([states, state] , dim=1)
_UpperCamelCase = returns_to_go[0, -1] - reward
_UpperCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
_UpperCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowercase_ , dtype=torch.long) * (step + 1)] , dim=1)
| 63 | 0 |
import cmath
import math
def lowerCamelCase__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
"""simple docstring"""
lowerCAmelCase_ = math.radians(__lowerCAmelCase )
lowerCAmelCase_ = math.radians(__lowerCAmelCase )
# Convert voltage and current to rectangular form
lowerCAmelCase_ = cmath.rect(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = cmath.rect(__lowerCAmelCase , __lowerCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
lowerCAmelCase_ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(__lowerCAmelCase )
DownloadCommand.register_subcommand(__lowerCAmelCase )
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
RunCommand.register_subcommand(__lowerCAmelCase )
ServeCommand.register_subcommand(__lowerCAmelCase )
UserCommands.register_subcommand(__lowerCAmelCase )
AddNewModelCommand.register_subcommand(__lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCAmelCase )
LfsCommands.register_subcommand(__lowerCAmelCase )
PTtoTFCommand.register_subcommand(__lowerCAmelCase )
# Let's go
lowerCAmelCase_ = parser.parse_args()
if not hasattr(__lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 231 | 1 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCAmelCase : List[Any] = '''CompVis/stable-diffusion-v1-1'''
UpperCAmelCase : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
UpperCAmelCase : List[Any] = '''CompVis/stable-diffusion-v1-3'''
UpperCAmelCase : Any = '''CompVis/stable-diffusion-v1-4'''
class __lowercase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A = True , ) -> Tuple:
'''simple docstring'''
super()._init_()
lowerCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __A ( self ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def __A ( self , A = "auto" ) -> Any:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __A ( self , A , A = 5_12 , A = 5_12 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> str:
'''simple docstring'''
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A ( self , A , A = 5_12 , A = 5_12 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> int:
'''simple docstring'''
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A ( self , A , A = 5_12 , A = 5_12 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A ( self , A , A = 5_12 , A = 5_12 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Union[str, Any]:
'''simple docstring'''
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A ( self , A , A = 5_12 , A = 5_12 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 364 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
enable_full_determinism()
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = UNetaDModel
UpperCamelCase : Union[str, Any] = "sample"
@property
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = UNetaDModel
UpperCamelCase : Dict = "sample"
@property
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 4
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
return (4, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (4, 32, 32)
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(A )
lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
model.to(A )
lowerCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=A )
model_accelerate.to(A )
model_accelerate.eval()
lowerCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase = noise.to(A )
lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A )
lowerCamelCase = model_accelerate(A , A )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=A , low_cpu_mem_usage=A )
model_normal_load.to(A )
model_normal_load.eval()
lowerCamelCase = model_normal_load(A , A )["""sample"""]
assert torch_all_close(A , A , rtol=1e-3 )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(A )
lowerCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCamelCase = noise.to(A )
lowerCamelCase = torch.tensor([10] * noise.shape[0] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-3 ) )
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = UNetaDModel
UpperCamelCase : Optional[int] = "sample"
@property
def __A ( self , A=(32, 32) ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=A )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __A ( self ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(A )
lowerCamelCase = self.dummy_input
lowerCamelCase = floats_tensor((4, 3) + (2_56, 2_56) ).to(A )
lowerCamelCase = noise
lowerCamelCase = model(**A )
assert image is not None, "Make sure output is not None"
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(A )
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (2_56, 2_56)
lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(A )
lowerCamelCase = 4
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(A )
lowerCamelCase = torch.tensor(batch_size * [1e-4] ).to(A )
with torch.no_grad():
lowerCamelCase = model(A , A ).sample
lowerCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCamelCase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(A , A , rtol=1e-2 ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
| 66 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class a__( lowercase__ ):
lowercase__ = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ = Features({"""audio""": Audio()} )
lowercase__ = Features({"""transcription""": Value("""string""" )} )
lowercase__ = "audio"
lowercase__ = "transcription"
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[int] ):
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
a : int = copy.deepcopy(self )
a : str = self.input_schema.copy()
a : List[str] = features[self.audio_column]
a : Optional[Any] = input_schema
return task_template
@property
def lowercase_ ( self : Union[str, Any] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 297 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any]=0 ) -> Any:
__UpperCAmelCase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowercase ) )
__UpperCAmelCase : int = np.random.RandomState(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**__lowercase ).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : List[str] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : Tuple = pipe(**__lowercase ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : str = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
# warmup pass to apply optimizations
__UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs() )
__UpperCAmelCase : Tuple = self.get_dummy_inputs()
__UpperCAmelCase : Any = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Optional[int] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Tuple = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : List[str] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Tuple ) -> str:
__UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : Optional[int] = ort.SessionOptions()
__UpperCAmelCase : List[Any] = False
return options
def UpperCAmelCase ( self : List[str] ) -> Tuple:
__UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase : Dict = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = """A fantasy landscape, trending on artstation"""
__UpperCAmelCase : str = np.random.RandomState(0 )
__UpperCAmelCase : Optional[Any] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : str = output.images
__UpperCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase : int = init_image.resize((768, 512) )
__UpperCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = """A fantasy landscape, trending on artstation"""
__UpperCAmelCase : int = np.random.RandomState(0 )
__UpperCAmelCase : Optional[int] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : Union[str, Any] = output.images
__UpperCAmelCase : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase : str = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 114 | 0 |
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase_ = remove_duplicates(key.upper() )
lowerCAmelCase_ = len(_A )
# First fill cipher with key characters
lowerCAmelCase_ = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 26 ):
lowerCAmelCase_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase_ = alphabet[i - offset]
lowerCAmelCase_ = char
return cipher_alphabet
def __UpperCamelCase ( _A , _A ):
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def __UpperCamelCase ( ):
lowerCAmelCase_ = input('''Enter message to encode or decode: ''' ).strip()
lowerCAmelCase_ = input('''Enter keyword: ''' ).strip()
lowerCAmelCase_ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCAmelCase_ = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCAmelCase_ = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 167 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str ):
def get_masked_lm_array(__lowerCAmelCase : str ):
a__ = F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
a__ = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_array(__lowerCAmelCase : str ):
a__ = F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
a__ = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_layer_array(__lowerCAmelCase : int , __lowerCAmelCase : str ):
a__ = F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
if "kernel" in name:
a__ = array.transpose()
return torch.from_numpy(_lowerCamelCase )
def get_encoder_attention_layer_array(__lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
a__ = F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
a__ = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
a__ = array.reshape(_lowerCamelCase )
if "kernel" in name:
a__ = array.transpose()
return torch.from_numpy(_lowerCamelCase )
print(F'Loading model based on config from {config_path}...' )
a__ = BertConfig.from_json_file(_lowerCamelCase )
a__ = BertForMaskedLM(_lowerCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
a__ = model.bert.encoder.layer[layer_index]
# Self-attention
a__ = layer.attention.self
a__ = get_encoder_attention_layer_array(
_lowerCamelCase , '_query_dense/kernel' , self_attn.query.weight.data.shape )
a__ = get_encoder_attention_layer_array(
_lowerCamelCase , '_query_dense/bias' , self_attn.query.bias.data.shape )
a__ = get_encoder_attention_layer_array(
_lowerCamelCase , '_key_dense/kernel' , self_attn.key.weight.data.shape )
a__ = get_encoder_attention_layer_array(
_lowerCamelCase , '_key_dense/bias' , self_attn.key.bias.data.shape )
a__ = get_encoder_attention_layer_array(
_lowerCamelCase , '_value_dense/kernel' , self_attn.value.weight.data.shape )
a__ = get_encoder_attention_layer_array(
_lowerCamelCase , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
a__ = layer.attention.output
a__ = get_encoder_attention_layer_array(
_lowerCamelCase , '_output_dense/kernel' , self_output.dense.weight.data.shape )
a__ = get_encoder_attention_layer_array(
_lowerCamelCase , '_output_dense/bias' , self_output.dense.bias.data.shape )
a__ = get_encoder_layer_array(_lowerCamelCase , '_attention_layer_norm/gamma' )
a__ = get_encoder_layer_array(_lowerCamelCase , '_attention_layer_norm/beta' )
# Intermediate
a__ = layer.intermediate
a__ = get_encoder_layer_array(_lowerCamelCase , '_intermediate_dense/kernel' )
a__ = get_encoder_layer_array(_lowerCamelCase , '_intermediate_dense/bias' )
# Output
a__ = layer.output
a__ = get_encoder_layer_array(_lowerCamelCase , '_output_dense/kernel' )
a__ = get_encoder_layer_array(_lowerCamelCase , '_output_dense/bias' )
a__ = get_encoder_layer_array(_lowerCamelCase , '_output_layer_norm/gamma' )
a__ = get_encoder_layer_array(_lowerCamelCase , '_output_layer_norm/beta' )
# Embeddings
a__ = get_encoder_array('_position_embedding_layer/embeddings' )
a__ = get_encoder_array('_type_embedding_layer/embeddings' )
a__ = get_encoder_array('_embedding_norm_layer/gamma' )
a__ = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
a__ = model.cls.predictions.transform
a__ = get_masked_lm_array('dense/kernel' )
a__ = get_masked_lm_array('dense/bias' )
a__ = get_masked_lm_array('layer_norm/gamma' )
a__ = get_masked_lm_array('layer_norm/beta' )
a__ = get_masked_lm_array('embedding_table' )
# Pooling
a__ = BertPooler(config=_lowerCamelCase )
a__ = get_encoder_array('_pooler_layer/kernel' )
a__ = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(_lowerCamelCase )
# Integration test - should load without any errors ;)
a__ = BertForMaskedLM.from_pretrained(_lowerCamelCase )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
snake_case : int = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 240 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 0 |
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a = logging.get_logger(__name__)
__a = {}
__a = {}
__a = {}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = None, ) ->Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowercase : List[Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowercase : Tuple = format_type
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = None ) ->Tuple:
"""simple docstring"""
lowercase : Optional[int] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowercase : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__a = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__a = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__a = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowercase ( _UpperCamelCase ) ->Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Formatter:
"""simple docstring"""
lowercase : Union[str, Any] = get_format_type_from_alias(UpperCAmelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 357 |
# flake8: noqa
# Lint as: python3
__a = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 173 | 0 |
from random import randint, random
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = False , lowercase_ = 5 , ) -> list:
"""simple docstring"""
A__ = [[-1] * number_of_cells] # Create a highway without any car
A__ = 0
A__ = max(lowercase_ , 0 )
while i < number_of_cells:
A__ = (
randint(0 , lowercase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = 0
A__ = highway_now[car_index + 1 :]
for cell in range(len(lowercase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase_ , -1 )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> list:
"""simple docstring"""
A__ = len(lowercase_ )
# Beforce calculations, the highway is empty
A__ = [-1] * number_of_cells
for car_index in range(lowercase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
A__ = min(highway_now[car_index] + 1 , lowercase_ )
# Number of empty cell before the next car
A__ = get_distance(lowercase_ , lowercase_ ) - 1
# We can't have the car causing an accident
A__ = min(next_highway[car_index] , lowercase_ )
if random() < probability:
# Randomly, a driver will slow down
A__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> list:
"""simple docstring"""
A__ = len(highway[0] )
for i in range(lowercase_ ):
A__ = update(highway[i] , lowercase_ , lowercase_ )
A__ = [-1] * number_of_cells
for car_index in range(lowercase_ ):
A__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
A__ = (car_index + speed) % number_of_cells
# Commit the change of position
A__ = speed
highway.append(lowercase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
"""simple docstring"""
# Imports
import numpy as np
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
def A_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
if red is not None:
_lowerCamelCase : Optional[int] = red
if green is not None:
_lowerCamelCase : Optional[Any] = green
if blue is not None:
_lowerCamelCase : Tuple = blue
if red_edge is not None:
_lowerCamelCase : Optional[Any] = red_edge
if nir is not None:
_lowerCamelCase : Union[str, Any] = nir
return True
def A_ ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None ):
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase )
_lowerCamelCase : str = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def A_ ( self ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def A_ ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def A_ ( self ):
return self.nir * (self.red / (self.green**2))
def A_ ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def A_ ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def A_ ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def A_ ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def A_ ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def A_ ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def A_ ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def A_ ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def A_ ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def A_ ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def A_ ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def A_ ( self ):
return (self.nir / self.green) - 1
def A_ ( self ):
return (self.nir / self.redEdge) - 1
def A_ ( self ):
return (self.red - self.blue) / self.red
def A_ ( self ):
_lowerCamelCase : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def A_ ( self ):
return self.nir - self.green
def A_ ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def A_ ( self ):
_lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def A_ ( self , lowercase=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def A_ ( self , lowercase=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def A_ ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def A_ ( self , lowercase=None , lowercase=None ):
return (self.nir - b) / (a * self.red)
def A_ ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def A_ ( self ):
return (self.red + self.green + self.blue) / 30.5
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def A_ ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def A_ ( self ):
return self.green / (self.nir + self.red + self.green)
def A_ ( self ):
return self.nir / (self.nir + self.red + self.green)
def A_ ( self ):
return self.red / (self.nir + self.red + self.green)
def A_ ( self ):
return (self.green - self.red) / (self.green + self.red)
def A_ ( self ):
return (self.red - self.green) / (self.red + self.green)
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_lowerCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def A_ ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def A_ ( self ):
return self.nir / self.red
def A_ ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def A_ ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 96 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 365 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=2, A=3, A=4, A=2, A=7, A=True, A=True, A=True, A=True, A=99, A=36, A=3, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=6, A=6, A=3, A=4, A=None, A=1_000, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = text_seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : str = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE : List[str] = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : int = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Tuple = t
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : int = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[int] = model(A, pixel_values=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : List[str] = model(A, bbox=A, pixel_values=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[int] = model(A, bbox=A, pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[Any] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : str = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = False
A : List[str] = False
A : Union[str, Any] = False
A : Optional[Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : List[Any] = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self, A, A, A, A, A ):
'''simple docstring'''
return True
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self, config_class=A, hidden_size=37 )
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(A )
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(A, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=A, )
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : List[str] = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).pixel_values.to(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
input_ids=input_ids.to(A ), bbox=bbox.to(A ), pixel_values=pixel_values.to(A ), )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], A, atol=1E-4 ) )
| 246 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_snake_case = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
_snake_case = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
_snake_case = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _A ( snake_case , snake_case ) -> List[str]:
return float((preds == labels).mean() )
def _A ( snake_case , snake_case ) -> Dict:
_lowercase : Optional[int] = simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : Tuple = float(fa_score(y_true=__UpperCAmelCase , y_pred=__UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _A ( snake_case , snake_case ) -> Tuple:
_lowercase : Tuple = float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )
_lowercase : Optional[int] = float(spearmanr(__UpperCAmelCase , __UpperCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_UpperCamelCase , _UpperCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_UpperCamelCase , _UpperCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_UpperCamelCase , _UpperCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_UpperCamelCase , _UpperCamelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 250 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Any = "Salesforce/blip-image-captioning-base"
_lowerCamelCase :int = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_lowerCamelCase :List[Any] = "image_captioner"
_lowerCamelCase :Tuple = AutoModelForVisionaSeq
_lowerCamelCase :Dict = ["image"]
_lowerCamelCase :str = ["text"]
def __init__( self : Dict , *UpperCamelCase : Any , **UpperCamelCase : Any ) -> Any:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any , UpperCamelCase : "Image" ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(images=UpperCamelCase , return_tensors="""pt""" )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
return self.model.generate(**UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0].strip()
| 242 | 0 |
"""simple docstring"""
def _a ( SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 7 , SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
for current_denominator in range(1 , limit + 1 ):
__lowerCAmelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCAmelCase = current_numerator
__lowerCAmelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 365 |
from pathlib import Path
import fire
from tqdm import tqdm
def _a ( SCREAMING_SNAKE_CASE_ : Dict="ro" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="en" , SCREAMING_SNAKE_CASE_ : Optional[Any]="wmt16" , SCREAMING_SNAKE_CASE_ : List[str]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
__lowerCAmelCase = datasets.load_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if save_dir is None:
__lowerCAmelCase = F"""{dataset}-{pair}"""
__lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_ )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__lowerCAmelCase = "val" if split == "validation" else split
__lowerCAmelCase = save_dir.joinpath(F"""{fn}.source""" )
__lowerCAmelCase = save_dir.joinpath(F"""{fn}.target""" )
__lowerCAmelCase = src_path.open("w+" )
__lowerCAmelCase = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowerCAmelCase = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 102 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.