code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase :
def __init__( self : Tuple , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Dict=6_4 , __UpperCAmelCase : Any=None ) -> List[str]:
SCREAMING_SNAKE_CASE__ = np.random.default_rng(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = length
SCREAMING_SNAKE_CASE__ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[str] ) -> Optional[Any]:
return self.length
def __getitem__( self : List[Any] , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase (torch.nn.Module ):
def __init__( self : Tuple , __UpperCAmelCase : Any=0 , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : Any=False ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Any]=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
SCREAMING_SNAKE_CASE__ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase (torch.nn.Module ):
def __init__( self : Optional[Any] , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : Any=0 , __UpperCAmelCase : Any=False ) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
SCREAMING_SNAKE_CASE__ = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Tuple=None ) -> List[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
SCREAMING_SNAKE_CASE__ = False
return x * self.a + self.b
def A ( snake_case__ , snake_case__ = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE__ = load_dataset("""csv""" , data_files=__a )
SCREAMING_SNAKE_CASE__ = datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE__ = {v: i for i, v in enumerate(__a )}
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=__a , max_length=__a , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE__ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ = datasets.map(
__a , batched=__a , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(__a , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(tokenized_datasets["""train"""] , shuffle=__a , collate_fn=__a , batch_size=2 )
SCREAMING_SNAKE_CASE__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=__a , collate_fn=__a , batch_size=1 )
return train_dataloader, eval_dataloader
| 196 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger(__name__)
def a_ ( __a ):
A__ = DPTConfig()
if "large" in checkpoint_url:
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [256, 512, 1024, 1024]
A__ = (1, 384, 384)
if "ade" in checkpoint_url:
A__ = True
A__ = 150
A__ = '''huggingface/label-files'''
A__ = '''ade20k-id2label.json'''
A__ = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='''dataset''' ) ) , '''r''' ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 150, 480, 480]
return config, expected_shape
def a_ ( __a ):
A__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__a , __a )
def a_ ( __a ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
A__ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
A__ = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
A__ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
A__ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
A__ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
A__ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
A__ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
A__ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
A__ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
A__ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
A__ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
A__ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
A__ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
A__ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
A__ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
A__ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
A__ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
A__ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
A__ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
A__ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
A__ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def a_ ( __a , __a ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def a_ ( ):
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def a_ ( __a , __a , __a , __a ):
A__ , A__ = get_dpt_config(__a )
# load original state_dict from URL
A__ = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(__a )
A__ = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(__a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
A__ = 480 if '''ade''' in checkpoint_url else 384
A__ = DPTImageProcessor(size=__a )
A__ = prepare_img()
A__ = image_processor(__a , return_tensors='''pt''' )
# forward pass
A__ = model(**__a ).logits if '''ade''' in checkpoint_url else model(**__a ).predicted_depth
# Assert logits
A__ = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
A__ = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(__a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __a , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __a )
)
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__a , )
image_processor.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__a , )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
__snake_case : Dict = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 571 | 0 |
'''simple docstring'''
_lowercase = 256
# Modulus to hash a string
_lowercase = 100_0003
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ) -> bool:
SCREAMING_SNAKE_CASE_ : List[str] =len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =len(UpperCAmelCase_ )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE_ : int =0
SCREAMING_SNAKE_CASE_ : List[str] =0
SCREAMING_SNAKE_CASE_ : Tuple =1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : int =(ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE_ : List[Any] =(ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE_ : str =(modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE_ : Optional[int] =(
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE_ ( ) -> None:
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''abc1abc12'''
SCREAMING_SNAKE_CASE_ : int ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
SCREAMING_SNAKE_CASE_ : Dict ='''alskfjaldsk23adsfabcabc'''
assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ ) and not rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ )
# Test 2)
SCREAMING_SNAKE_CASE_ : Any ='''ABABX'''
SCREAMING_SNAKE_CASE_ : int ='''ABABZABABYABABX'''
assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ )
# Test 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='''AAAB'''
SCREAMING_SNAKE_CASE_ : List[Any] ='''ABAAAAAB'''
assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ )
# Test 4)
SCREAMING_SNAKE_CASE_ : int ='''abcdabcy'''
SCREAMING_SNAKE_CASE_ : str ='''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ )
# Test 5)
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''Lü'''
SCREAMING_SNAKE_CASE_ : Dict ='''Lüsai'''
assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any ='''Lue'''
assert not rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 710 |
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> Dict:
for i in range(0 , UpperCAmelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> List[str]:
for i in range(UpperCAmelCase_ , 0 , -1 ):
for _ in range(UpperCAmelCase_ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(UpperCAmelCase_ ) # upper half
reverse_floyd(UpperCAmelCase_ ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
_lowercase = 1
while K:
_lowercase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_lowercase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 431 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
lowercase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | """simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 10**-1 ):
"""simple docstring"""
_UpperCAmelCase = cross(lowercase ,lowercase )
_UpperCAmelCase = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase__ = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
UpperCAmelCase__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 277 | 0 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case ( _a: Dict , _a: int )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for part_id in partition_order:
lowerCamelCase__ = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(_a ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(100 ).repartition(1 )
lowerCamelCase__ = Spark(_a )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(10 ).repartition(2 )
lowerCamelCase__ = [1, 0]
lowerCamelCase__ = _generate_iterable_examples(_a , _a ) # Reverse the partitions.
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , _a )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(10 ).repartition(1 )
lowerCamelCase__ = SparkExamplesIterable(_a )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_a ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
lowerCamelCase__ = lambda _a : x.reverse()
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [2, 1, 0] )
lowerCamelCase__ = SparkExamplesIterable(_a ).shuffle_data_sources(_a )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_a ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCamelCase__ = SparkExamplesIterable(_a ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [0, 2] )
for i, (row_id, row_dict) in enumerate(_a ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCamelCase__ = SparkExamplesIterable(_a ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [1, 3] )
for i, (row_id, row_dict) in enumerate(_a ):
lowerCamelCase__ , lowerCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( )-> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCamelCase__ = spark.range(100 ).repartition(1 )
lowerCamelCase__ = Spark(_a )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 706 |
"""simple docstring"""
def snake_case ( _a: int = 4000000 )-> int:
'''simple docstring'''
lowerCamelCase__ = [0, 1]
lowerCamelCase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase__ = 0
for j in range(len(_a ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 659 | 0 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
snake_case_ : Dict = get_logger(__name__)
snake_case_ : Any = Path(__file__).parent / 'model_card_template.md'
snake_case_ : List[Any] = uuida().hex
snake_case_ : List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
snake_case_ : Tuple = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
snake_case_ : List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def A__ ( UpperCAmelCase_ = None ):
_UpperCamelCase : Any = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
ua += "; " + user_agent
return ua
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None ):
if token is None:
_UpperCamelCase : Optional[Any] = HfFolder.get_token()
if organization is None:
_UpperCamelCase : Union[str, Any] = whoami(UpperCAmelCase_ )['name']
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(UpperCAmelCase_ , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
_UpperCamelCase : Union[str, Any] = args.hub_token if hasattr(UpperCAmelCase_ , 'hub_token' ) else None
_UpperCamelCase : Union[str, Any] = get_full_repo_name(UpperCAmelCase_ , token=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCAmelCase_ , model_name=UpperCAmelCase_ , repo_name=UpperCAmelCase_ , dataset_name=args.dataset_name if hasattr(UpperCAmelCase_ , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCAmelCase_ , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCAmelCase_ , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCAmelCase_ , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCAmelCase_ , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCAmelCase_ , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCAmelCase_ , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCAmelCase_ , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCAmelCase_ , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(UpperCAmelCase_ , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCAmelCase_ , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
_UpperCamelCase : Optional[Any] = os.path.join(args.output_dir , 'README.md' )
model_card.save(UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
_UpperCamelCase : Tuple = str(Path(UpperCAmelCase_ ).as_posix() )
_UpperCamelCase : int = re.search(R'snapshots/([^/]+)/' , UpperCAmelCase_ )
if search is None:
return None
_UpperCamelCase : Optional[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCAmelCase_ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
snake_case_ : Optional[int] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
snake_case_ : Dict = os.path.join(hf_cache_home, 'diffusers')
def A__ ( UpperCAmelCase_ = None , UpperCAmelCase_ = None ):
if new_cache_dir is None:
_UpperCamelCase : Optional[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
_UpperCamelCase : Optional[int] = old_diffusers_cache
_UpperCamelCase : str = Path(UpperCAmelCase_ ).expanduser()
_UpperCamelCase : Tuple = Path(UpperCAmelCase_ ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_UpperCamelCase : List[Any] = new_cache_dir / old_blob_path.relative_to(UpperCAmelCase_ )
new_blob_path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
os.replace(UpperCAmelCase_ , UpperCAmelCase_ )
try:
os.symlink(UpperCAmelCase_ , UpperCAmelCase_ )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
snake_case_ : Any = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
snake_case_ : List[str] = 0
else:
with open(cache_version_file) as f:
try:
snake_case_ : Optional[Any] = int(f.read())
except ValueError:
snake_case_ : Dict = 0
if cache_version < 1:
snake_case_ : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
snake_case_ : Union[str, Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None ):
if variant is not None:
_UpperCamelCase : str = weights_name.split('.' )
_UpperCamelCase : Any = splits[:-1] + [variant] + splits[-1:]
_UpperCamelCase : Any = '.'.join(UpperCAmelCase_ )
return weights_name
def A__ ( UpperCAmelCase_ , *,
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , ):
_UpperCamelCase : int = str(UpperCAmelCase_ )
if os.path.isfile(UpperCAmelCase_ ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCAmelCase_ ):
if os.path.isfile(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ):
# Load from a PyTorch checkpoint
_UpperCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ):
_UpperCamelCase : Tuple = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCAmelCase_ ).base_version ) >= version.parse('0.20.0' )
):
try:
_UpperCamelCase : Union[str, Any] = hf_hub_download(
UpperCAmelCase_ , filename=_add_variant(UpperCAmelCase_ , UpperCAmelCase_ ) , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , user_agent=UpperCAmelCase_ , subfolder=UpperCAmelCase_ , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , UpperCAmelCase_ , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCAmelCase_ , UpperCAmelCase_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(UpperCAmelCase_ , UpperCAmelCase_ )}\' so that the correct variant file can be added.' , UpperCAmelCase_ , )
try:
# 2. Load model file as usual
_UpperCamelCase : Tuple = hf_hub_download(
UpperCAmelCase_ , filename=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , user_agent=UpperCAmelCase_ , subfolder=UpperCAmelCase_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'this model name. Check the model page at '
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 195 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
snake_case_ : List[str] = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None ):
_UpperCamelCase : Optional[int] = tesseract_config if tesseract_config is not None else ''
# apply OCR
_UpperCamelCase : int = to_pil_image(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Dict = pil_image.size
_UpperCamelCase : Tuple = pytesseract.image_to_data(UpperCAmelCase_ , lang=UpperCAmelCase_ , output_type='dict' , config=UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_UpperCamelCase : Optional[Any] = [idx for idx, word in enumerate(UpperCAmelCase_ ) if not word.strip()]
_UpperCamelCase : Any = [word for idx, word in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Any = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Union[str, Any] = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : Union[str, Any] = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
_UpperCamelCase : List[str] = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_UpperCamelCase : Tuple = []
for x, y, w, h in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase_ )
# finally, normalize the bounding boxes
_UpperCamelCase : List[Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase__ ( lowercase ):
lowercase__ = ["""pixel_values"""]
def __init__( self : Dict ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = "" ,**lowerCamelCase__ : List[Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = size if size is not None else {'height': 224, 'width': 224}
_UpperCamelCase : Union[str, Any] = get_size_dict(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = do_resize
_UpperCamelCase : str = size
_UpperCamelCase : Any = resample
_UpperCamelCase : List[str] = apply_ocr
_UpperCamelCase : int = ocr_lang
_UpperCamelCase : Union[str, Any] = tesseract_config
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_UpperCamelCase : List[str] = (size['height'], size['width'])
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : List[Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Tuple = size if size is not None else self.size
_UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = resample if resample is not None else self.resample
_UpperCamelCase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
_UpperCamelCase : Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
_UpperCamelCase : int = tesseract_config if tesseract_config is not None else self.tesseract_config
_UpperCamelCase : Dict = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
_UpperCamelCase : Union[str, Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if apply_ocr:
requires_backends(self ,'pytesseract' )
_UpperCamelCase : Tuple = []
_UpperCamelCase : Tuple = []
for image in images:
_UpperCamelCase , _UpperCamelCase : Optional[Any] = apply_tesseract(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
_UpperCamelCase : Any = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_UpperCamelCase : Optional[int] = [flip_channel_order(lowerCamelCase__ ) for image in images]
_UpperCamelCase : Dict = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
_UpperCamelCase : Union[str, Any] = BatchFeature(data={'pixel_values': images} ,tensor_type=lowerCamelCase__ )
if apply_ocr:
_UpperCamelCase : Dict = words_batch
_UpperCamelCase : str = boxes_batch
return data
| 195 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : Tuple = "gptj"
lowercase : List[Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __UpperCAmelCase=5_04_00 , __UpperCAmelCase=20_48 , __UpperCAmelCase=40_96 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> str:
a : Optional[Any] = vocab_size
a : str = n_positions
a : Optional[Any] = n_embd
a : Tuple = n_layer
a : List[str] = n_head
a : Optional[Any] = n_inner
a : int = rotary_dim
a : str = activation_function
a : Tuple = resid_pdrop
a : List[str] = embd_pdrop
a : Dict = attn_pdrop
a : Dict = layer_norm_epsilon
a : List[Any] = initializer_range
a : Dict = use_cache
a : Optional[int] = bos_token_id
a : str = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> List[str]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , 'pad_token_id' , __UpperCAmelCase ):
# TODO: how to do that better?
a : Any = 0
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
a : int = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='inputs' )
a : List[str] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
a : Tuple = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase_ ( self ) -> int:
return self._config.n_layer
@property
def lowercase_ ( self ) -> int:
return self._config.n_head
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
a : Optional[int] = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
a : Dict = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
a , a : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
a : List[str] = seqlen + 2
a : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a : Optional[int] = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
a : Any = common_inputs['attention_mask']
if self.use_past:
a : Any = ordered_inputs['attention_mask'].dtype
a : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase_ ( self ) -> int:
return 13
| 509 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : str = ["image_processor", "tokenizer"]
lowercase : Union[str, Any] = "FlavaImageProcessor"
lowercase : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Tuple:
a : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCAmelCase , )
a : Any = kwargs.pop('feature_extractor' )
a : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
a : Optional[Any] = self.image_processor
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a : Tuple = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if images is not None:
a : Tuple = self.image_processor(
__UpperCAmelCase , return_image_mask=__UpperCAmelCase , return_codebook_pixels=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(__UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ ( self ) -> str:
a : str = self.tokenizer.model_input_names
a : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ ( self ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase_ ( self ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , )
return self.image_processor
| 509 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase : Any =input('''Enter image url: ''').strip()
print(F'''Downloading image from {url} ...''')
lowerCAmelCase : List[Any] =BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase : str =soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCAmelCase : Union[str, Any] =requests.get(image_url).content
lowerCAmelCase : int =F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 172 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__magic_name__ = HfArgumentParser(InitializationArguments)
__magic_name__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__magic_name__ = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
__magic_name__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__magic_name__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 155 | 0 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=768 ) -> List[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
lowerCamelCase_ = proj_size
lowerCamelCase_ = CLIPVisionModel(UpperCamelCase__ )
lowerCamelCase_ = PaintByExampleMapper(UpperCamelCase__ )
lowerCamelCase_ = nn.LayerNorm(config.hidden_size )
lowerCamelCase_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowerCamelCase_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model(pixel_values=UpperCamelCase__ )
lowerCamelCase_ = clip_output.pooler_output
lowerCamelCase_ = self.mapper(latent_states[:, None] )
lowerCamelCase_ = self.final_layer_norm(UpperCamelCase__ )
lowerCamelCase_ = self.proj_out(UpperCamelCase__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = (config.num_hidden_layers + 1) // 5
lowerCamelCase_ = config.hidden_size
lowerCamelCase_ = 1
lowerCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , activation_fn='''gelu''' , attention_bias=UpperCamelCase__ )
for _ in range(UpperCamelCase__ )
] )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
for block in self.blocks:
lowerCamelCase_ = block(UpperCamelCase__ )
return hidden_states | 66 |
"""simple docstring"""
import argparse
import os
import re
__lowercase : Optional[int] = """src/diffusers"""
# Pattern that looks at the indentation in a line.
__lowercase : Dict = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
__lowercase : int = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowercase : Optional[Any] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
__lowercase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowercase : Any = re.compile(r"""\[([^\]]+)\]""")
def lowerCamelCase_ ( _lowerCamelCase : List[str] ):
lowerCamelCase_ = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def lowerCamelCase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str]="" , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None ):
lowerCamelCase_ = 0
lowerCamelCase_ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
lowerCamelCase_ = ['''\n'''.join(lines[:index] )]
else:
lowerCamelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase_ = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
lowerCamelCase_ = [lines[index + 1]]
index += 1
else:
lowerCamelCase_ = []
else:
blocks.append('''\n'''.join(_lowerCamelCase ) )
lowerCamelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append('''\n'''.join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowerCamelCase_ ( _lowerCamelCase : int ):
def _inner(_lowerCamelCase : List[Any] ):
return key(_lowerCamelCase ).lower().replace('''_''' , '''''' )
return _inner
def lowerCamelCase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=None ):
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : Union[str, Any] ):
return x
if key is None:
lowerCamelCase_ = noop
# Constants are all uppercase, they go first.
lowerCamelCase_ = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase_ = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase_ = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
lowerCamelCase_ = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : List[Any] ):
lowerCamelCase_ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowerCamelCase_ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] ) + "]"
lowerCamelCase_ = import_statement.split('''\n''' )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase_ = 2 if lines[1].strip() == '''[''' else 1
lowerCamelCase_ = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase_ = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
lowerCamelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCamelCase_ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
lowerCamelCase_ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase_ = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def lowerCamelCase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=True ):
with open(_lowerCamelCase , '''r''' ) as f:
lowerCamelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase_ = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase_ = main_blocks[block_idx]
lowerCamelCase_ = block.split('''\n''' )
# Get to the start of the imports.
lowerCamelCase_ = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase_ = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase_ = '''\n'''.join(block_lines[line_idx:-1] )
lowerCamelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase_ = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase_ = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase_ = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase_ = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
lowerCamelCase_ = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase_ = 0
lowerCamelCase_ = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase_ = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(_lowerCamelCase , '''w''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase : Tuple=True ):
lowerCamelCase_ = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
lowerCamelCase_ = sort_imports(os.path.join(_lowerCamelCase , '''__init__.py''' ) , check_only=_lowerCamelCase )
if result:
lowerCamelCase_ = [os.path.join(_lowerCamelCase , '''__init__.py''' )]
if len(_lowerCamelCase ) > 0:
raise ValueError(F"""Would overwrite {len(_lowerCamelCase )} files, run `make style`.""" )
if __name__ == "__main__":
__lowercase : Any = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__lowercase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 66 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
SCREAMING_SNAKE_CASE_ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
SCREAMING_SNAKE_CASE_ = 'main'
# Default branch name
SCREAMING_SNAKE_CASE_ = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
SCREAMING_SNAKE_CASE_ = 'aaaaaaa'
# This commit does not exist, so we should 404.
SCREAMING_SNAKE_CASE_ = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
SCREAMING_SNAKE_CASE_ = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
print("Bonjour!" )
yield
print("Au revoir!" )
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class a ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(find_labels(A_ ) , ["labels"] )
self.assertEqual(find_labels(A_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(A_ ) , ["start_positions", "end_positions"] )
class a ( UpperCAmelCase ):
pass
self.assertEqual(find_labels(A_ ) , ["labels"] )
@require_tf
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(find_labels(A_ ) , ["labels"] )
self.assertEqual(find_labels(A_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(A_ ) , ["start_positions", "end_positions"] )
class a ( UpperCAmelCase ):
pass
self.assertEqual(find_labels(A_ ) , ["labels"] )
@require_flax
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.assertEqual(find_labels(A_ ) , [] )
self.assertEqual(find_labels(A_ ) , [] )
self.assertEqual(find_labels(A_ ) , [] )
class a ( UpperCAmelCase ):
pass
self.assertEqual(find_labels(A_ ) , [] )
| 300 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 300 | 1 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 623 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {'vocab_file': 'spiece.model'}
A : Union[str, Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
A : Tuple = {
'albert-base-v1': 5_1_2,
'albert-large-v1': 5_1_2,
'albert-xlarge-v1': 5_1_2,
'albert-xxlarge-v1': 5_1_2,
'albert-base-v2': 5_1_2,
'albert-large-v2': 5_1_2,
'albert-xlarge-v2': 5_1_2,
'albert-xxlarge-v2': 5_1_2,
}
A : Dict = '▁'
class A ( UpperCamelCase_ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]="[CLS]" , _UpperCAmelCase : Any="[SEP]" , _UpperCAmelCase : List[str]="<unk>" , _UpperCAmelCase : Dict="[SEP]" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Optional[Any]="[MASK]" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Dict , ) -> Any:
"""simple docstring"""
lowercase__ = (
AddedToken(a__ , lstrip=a__ , rstrip=a__ , normalized=a__ )
if isinstance(a__ , a__ )
else mask_token
)
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__(self : str , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
if self.remove_space:
lowercase__ = " ".join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowercase__ = unicodedata.normalize("""NFKD""" , a__ )
lowercase__ = "".join([c for c in outputs if not unicodedata.combining(a__ )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.preprocess_text(a__ )
lowercase__ = self.sp_model.encode(a__ , out_type=a__ )
lowercase__ = []
for piece in pieces:
if len(a__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(a__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a__ )
else:
new_pieces.append(a__ )
return new_pieces
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.sp_model.PieceToId(a__ )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(a__ )
def lowerCamelCase__ (self : int , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ""
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(a__ )
lowercase__ = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> str:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> Any:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> Tuple:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Any:
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 15 |
'''simple docstring'''
from math import factorial
snake_case = {str(digit): factorial(digit) for digit in range(10)}
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowerCamelCase_ ) )
def UpperCAmelCase_ ( lowerCamelCase_ = 6_0 , lowerCamelCase_ = 1_0_0_0_0_0_0 ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
lowerCAmelCase__ : str = 0
# the cached sizes of the previous chains
lowerCAmelCase__ : dict[int, int] = {}
for start_chain_element in range(1 , lowerCamelCase_ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase__ : Any = set()
lowerCAmelCase__ : int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase__ : Dict = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowerCamelCase_ )
chain_set_length += 1
lowerCAmelCase__ : Dict = digit_factorial_sum(lowerCamelCase_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase__ : Optional[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution()}')
| 378 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowercase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase_ , cache_dir=UpperCamelCase_ )
__lowercase : str = [t[-1] for t in os.walk(os.path.join(UpperCamelCase_ , os.listdir(UpperCamelCase_ )[0] , '''snapshots''' ) )]
__lowercase : Union[str, Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase_ )
__lowercase : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase : Dict = jax.random.PRNGKey(0 )
__lowercase : str = 4
__lowercase : Tuple = jax.device_count()
__lowercase : Tuple = num_samples * [prompt]
__lowercase : Union[str, Any] = pipeline.prepare_inputs(UpperCamelCase_ )
# shard inputs and rng
__lowercase : Union[str, Any] = replicate(UpperCamelCase_ )
__lowercase : Optional[int] = jax.random.split(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Dict = shard(UpperCamelCase_ )
__lowercase : Any = pipeline(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1E-3
assert np.abs(np.abs(UpperCamelCase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
__lowercase : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCamelCase_ ) == num_samples
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=UpperCamelCase_ )
__lowercase : Optional[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase : Optional[Any] = jax.random.PRNGKey(0 )
__lowercase : str = 50
__lowercase : List[str] = jax.device_count()
__lowercase : Any = num_samples * [prompt]
__lowercase : List[Any] = pipeline.prepare_inputs(UpperCamelCase_ )
# shard inputs and rng
__lowercase : List[Any] = replicate(UpperCamelCase_ )
__lowercase : str = jax.random.split(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Optional[Any] = shard(UpperCamelCase_ )
__lowercase : List[Any] = pipeline(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def _lowerCamelCase ( self ) -> int:
__lowercase : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase_ )
__lowercase : Optional[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase : Any = jax.random.PRNGKey(0 )
__lowercase : List[Any] = 50
__lowercase : List[Any] = jax.device_count()
__lowercase : Union[str, Any] = num_samples * [prompt]
__lowercase : Optional[int] = pipeline.prepare_inputs(UpperCamelCase_ )
# shard inputs and rng
__lowercase : Dict = replicate(UpperCamelCase_ )
__lowercase : Optional[Any] = jax.random.split(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : str = shard(UpperCamelCase_ )
__lowercase : Optional[int] = pipeline(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__lowercase : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase : int = jax.random.PRNGKey(0 )
__lowercase : Union[str, Any] = 50
__lowercase : Optional[Any] = jax.device_count()
__lowercase : Optional[int] = num_samples * [prompt]
__lowercase : int = pipeline.prepare_inputs(UpperCamelCase_ )
# shard inputs and rng
__lowercase : int = replicate(UpperCamelCase_ )
__lowercase : Optional[Any] = jax.random.split(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Tuple = shard(UpperCamelCase_ )
__lowercase : Union[str, Any] = pipeline(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : int = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , )
__lowercase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
__lowercase : List[Any] = scheduler.create_state()
__lowercase : Optional[Any] = scheduler_state
__lowercase : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase : int = jax.random.PRNGKey(0 )
__lowercase : Union[str, Any] = 50
__lowercase : Optional[int] = jax.device_count()
__lowercase : Tuple = num_samples * [prompt]
__lowercase : Dict = pipeline.prepare_inputs(UpperCamelCase_ )
# shard inputs and rng
__lowercase : Optional[Any] = replicate(UpperCamelCase_ )
__lowercase : Optional[Any] = jax.random.split(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Dict = shard(UpperCamelCase_ )
__lowercase : Optional[int] = pipeline(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase : List[str] = jax.device_count()
__lowercase : Any = num_samples * [prompt]
__lowercase : int = jax.random.split(jax.random.PRNGKey(0 ) , UpperCamelCase_ )
__lowercase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase_ , )
__lowercase : Union[str, Any] = replicate(UpperCamelCase_ )
__lowercase : List[str] = pipeline.prepare_inputs(UpperCamelCase_ )
__lowercase : List[str] = shard(UpperCamelCase_ )
__lowercase : Union[str, Any] = pipeline(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
__lowercase : Union[str, Any] = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
__lowercase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase_ , use_memory_efficient_attention=UpperCamelCase_ , )
__lowercase : List[Any] = replicate(UpperCamelCase_ )
__lowercase : int = pipeline.prepare_inputs(UpperCamelCase_ )
__lowercase : List[str] = shard(UpperCamelCase_ )
__lowercase : List[str] = pipeline(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
__lowercase : List[Any] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 715 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=2 , UpperCamelCase_=56 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=2 , UpperCamelCase_=7 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=4 , UpperCamelCase_="block_sparse" , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=2 , UpperCamelCase_=3 , ) -> Any:
__lowercase : Optional[Any] = parent
__lowercase : Optional[Any] = batch_size
__lowercase : Optional[int] = seq_length
__lowercase : Any = is_training
__lowercase : int = use_attention_mask
__lowercase : List[Any] = use_token_type_ids
__lowercase : Union[str, Any] = use_labels
__lowercase : Tuple = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Optional[int] = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : int = type_sequence_label_size
__lowercase : Dict = initializer_range
__lowercase : Union[str, Any] = num_choices
__lowercase : Dict = rescale_embeddings
__lowercase : int = attention_type
__lowercase : Tuple = use_bias
__lowercase : Tuple = block_size
__lowercase : Dict = num_random_blocks
def _lowerCamelCase ( self ) -> Any:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Tuple = None
if self.use_attention_mask:
__lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[str] = None
if self.use_token_type_ids:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase : int = config_and_inputs
__lowercase : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : List[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> List[str]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> Union[str, Any]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> Any:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> str:
super().test_hidden_states_output()
@slow
def _lowerCamelCase ( self ) -> Any:
for model_class_name in self.all_model_classes:
__lowercase : Optional[Any] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(UpperCamelCase_ )
def _lowerCamelCase ( self ) -> str:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase ,__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Union[str, Any] = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
__lowercase : Dict = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowercase : Optional[Any] = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1E-5 , UpperCamelCase_="outputs" , UpperCamelCase_=None ) -> Optional[int]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 523 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
a_ = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
a_ = re.compile(r"""([a-z\d])([A-Z])""")
a_ = re.compile(r"""(?<!_)_(?!_)""")
a_ = re.compile(r"""(_{2,})""")
a_ = r"""^\w+(\.\w+)*$"""
a_ = r"""<>:/\|?*"""
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = _uppercase_uppercase_re.sub(r'\1_\2' , __a )
_lowerCamelCase : Tuple = _lowercase_uppercase_re.sub(r'\1_\2' , __a )
return name.lower()
def UpperCAmelCase_ ( __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Dict = _single_underscore_re.split(__a )
_lowerCamelCase : Tuple = [_multiple_underscores_re.split(__a ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__a ) if n != '' )
def UpperCAmelCase_ ( __a : List[Any] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__a )
def UpperCAmelCase_ ( __a : Union[str, Any] , __a : Optional[int] ):
'''simple docstring'''
if os.path.basename(__a ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __a ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(__a )}-{split}"
def UpperCAmelCase_ ( __a : Any , __a : Union[str, Any] , __a : List[Any] , __a : List[str]=None ):
'''simple docstring'''
_lowerCamelCase : List[Any] = filename_prefix_for_split(__a , __a )
if filetype_suffix:
prefix += f".{filetype_suffix}"
_lowerCamelCase : List[str] = os.path.join(__a , __a )
return f"{filepath}*"
def UpperCAmelCase_ ( __a : str , __a : List[Any] , __a : List[str] , __a : Tuple=None , __a : Tuple=None ):
'''simple docstring'''
_lowerCamelCase : Tuple = filename_prefix_for_split(__a , __a )
_lowerCamelCase : List[str] = os.path.join(__a , __a )
if shard_lengths:
_lowerCamelCase : Union[str, Any] = len(__a )
_lowerCamelCase : str = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__a )]
if filetype_suffix:
_lowerCamelCase : int = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
_lowerCamelCase : int = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 437 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : List[str] , _lowercase : Any , _lowercase : Optional[int]=7 , _lowercase : List[str]=3 , _lowercase : Optional[Any]=18 , _lowercase : List[Any]=30 , _lowercase : List[Any]=4_00 , _lowercase : int=True , _lowercase : Union[str, Any]=None , _lowercase : Any=True , _lowercase : List[Any]=None , _lowercase : List[Any]=True , _lowercase : List[Any]=[0.5, 0.5, 0.5] , _lowercase : List[str]=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE__ : int = size if size is not None else {'''shortest_edge''': 18}
SCREAMING_SNAKE_CASE__ : Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE__ : Any = do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size
SCREAMING_SNAKE_CASE__ : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE__ : Tuple = crop_size
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Dict = image_mean
SCREAMING_SNAKE_CASE__ : str = image_std
def lowercase__ ( self : Any ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple = LevitImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = LevitImageProcessingTester(self )
@property
def lowercase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowercase__ , '''size''' ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict = image_processing(lowercase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :Tuple = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 | 0 |
'''simple docstring'''
from collections import defaultdict
def UpperCAmelCase ( UpperCAmelCase__ : int):
lowerCamelCase : str = 1
lowerCamelCase : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCAmelCase__)
if ret % 2 == 0:
cuts.append(UpperCAmelCase__)
return ret
def UpperCAmelCase ( ):
dfs(1)
if __name__ == "__main__":
A , A = 10, 9
A = defaultdict(list)
A = {}
A = []
A = 0
A = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 320 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase__ : int):
lowerCamelCase : Optional[int] = str(UpperCAmelCase__)
return len(UpperCAmelCase__) == 9 and set(UpperCAmelCase__) == set('123456789')
def UpperCAmelCase ( ):
for base_num in range(99_99 , 49_99 , -1):
lowerCamelCase : Dict = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
for base_num in range(3_33 , 99 , -1):
lowerCamelCase : Tuple = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 320 | 1 |
'''simple docstring'''
import math
import os
import sys
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = ''
try:
with open(lowerCamelCase , 'rb' ) as binary_file:
lowerCAmelCase = binary_file.read()
for dat in data:
lowerCAmelCase = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( lowerCamelCase : dict[str, str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : str ):
lexicon.pop(lowerCamelCase )
lowerCAmelCase = last_match_id
if math.loga(lowerCamelCase ).is_integer():
for curr_key in lexicon:
lowerCAmelCase = '0' + lexicon[curr_key]
lowerCAmelCase = bin(lowerCamelCase )[2:]
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = {'0': '0', '1': '1'}
lowerCAmelCase , lowerCAmelCase = '', ''
lowerCAmelCase = len(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
index += 1
lowerCAmelCase = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowerCAmelCase = lexicon[curr_string]
result += last_match_id
return result
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
lowerCAmelCase = os.path.getsize(lowerCamelCase )
lowerCAmelCase = bin(lowerCamelCase )[2:]
lowerCAmelCase = len(lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
lowerCAmelCase = 8
try:
with open(lowerCamelCase , 'wb' ) as opened_file:
lowerCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
lowerCAmelCase = read_file_binary(lowerCamelCase )
lowerCAmelCase = compress_data(lowerCamelCase )
lowerCAmelCase = add_file_length(lowerCamelCase , lowerCamelCase )
write_file_binary(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 513 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = ['''pixel_values''']
def __init__( self : int , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
lowerCAmelCase = (size['height'], size['width'])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) -> str:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Dict[str, int]] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> PIL.Image.Image:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowerCAmelCase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowerCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCAmelCase__ )
return encoded_outputs
| 513 | 1 |
from math import isqrt
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__lowerCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 10**6 ) -> int:
snake_case__ = 0
snake_case__ = 1
snake_case__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(__lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = None
@property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase , "feature_size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "sampling_rate" ) )
self.assertTrue(hasattr(__lowerCamelCase , "padding_value" ) )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: str = feat_extract.model_input_names[0]
UpperCamelCase__: Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCamelCase ) == len(__lowerCamelCase ) for x, y in zip(__lowerCamelCase , processed_features[input_name] ) ) )
UpperCamelCase__: Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCamelCase )
UpperCamelCase__: Any = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCamelCase__: List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase__: str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: List[Any] = feat_extract.model_input_names[0]
UpperCamelCase__: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCamelCase__: Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase__: Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowerCamelCase )
UpperCamelCase__: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: List[str] = feat_extract.model_input_names[0]
UpperCamelCase__: Dict = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
UpperCamelCase__: int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase__: int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int=False ):
'''simple docstring'''
def _inputs_have_equal_length(__lowerCamelCase: Optional[int] ):
UpperCamelCase__: Optional[int] = len(input[0] )
for input_slice in input[1:]:
if len(__lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(__lowerCamelCase: List[str] , __lowerCamelCase: str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(__lowerCamelCase , __lowerCamelCase ):
if not np.allclose(np.asarray(__lowerCamelCase ) , np.asarray(__lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCamelCase__: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowerCamelCase )
UpperCamelCase__: Optional[int] = feat_extract.model_input_names[0]
UpperCamelCase__: Dict = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: Dict = self.feat_extract_tester.seq_length_diff
UpperCamelCase__: List[Any] = self.feat_extract_tester.max_seq_length + pad_diff
UpperCamelCase__: List[str] = self.feat_extract_tester.min_seq_length
UpperCamelCase__: str = self.feat_extract_tester.batch_size
UpperCamelCase__: Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCamelCase__: int = feat_extract.pad(__lowerCamelCase , padding=__lowerCamelCase )
UpperCamelCase__: List[str] = input_a[input_name]
UpperCamelCase__: Dict = feat_extract.pad(__lowerCamelCase , padding="longest" )
UpperCamelCase__: int = input_a[input_name]
UpperCamelCase__: int = feat_extract.pad(__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
UpperCamelCase__: Union[str, Any] = input_a[input_name]
UpperCamelCase__: Any = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )
UpperCamelCase__: Dict = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding="max_length" )[input_name]
UpperCamelCase__: Optional[Any] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , return_tensors="np" )
UpperCamelCase__: Tuple = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(__lowerCamelCase , __lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase__: int = feat_extract.pad(__lowerCamelCase , pad_to_multiple_of=10 )
UpperCamelCase__: List[str] = input_a[input_name]
UpperCamelCase__: Optional[int] = feat_extract.pad(__lowerCamelCase , padding="longest" , pad_to_multiple_of=10 )
UpperCamelCase__: int = input_a[input_name]
UpperCamelCase__: Tuple = feat_extract.pad(
__lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=__lowerCamelCase )
UpperCamelCase__: Tuple = input_a[input_name]
UpperCamelCase__: Optional[int] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=__lowerCamelCase , return_tensors="np" , )
UpperCamelCase__: Any = input_a[input_name]
self.assertTrue(all(len(__lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase__: Dict = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCamelCase__: List[str] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: Dict=False ):
'''simple docstring'''
def _inputs_have_equal_length(__lowerCamelCase: Dict ):
UpperCamelCase__: Optional[int] = len(input[0] )
for input_slice in input[1:]:
if len(__lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(__lowerCamelCase: Tuple , __lowerCamelCase: str ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(__lowerCamelCase , __lowerCamelCase ):
if not np.allclose(np.asarray(__lowerCamelCase ) , np.asarray(__lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCamelCase__: Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowerCamelCase )
UpperCamelCase__: Optional[Any] = feat_extract.model_input_names[0]
UpperCamelCase__: List[str] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCamelCase__: List[str] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=__lowerCamelCase )
UpperCamelCase__: Dict = input_a[input_name]
UpperCamelCase__: Optional[Any] = feat_extract.pad(__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) )
UpperCamelCase__: str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
# truncate to smallest with np
UpperCamelCase__: Optional[Any] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=__lowerCamelCase , )
UpperCamelCase__: Dict = input_a[input_name]
UpperCamelCase__: Optional[int] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
UpperCamelCase__: Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
# truncate to middle
UpperCamelCase__: List[Any] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=__lowerCamelCase , return_tensors="np" , )
UpperCamelCase__: str = input_a[input_name]
UpperCamelCase__: Union[str, Any] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=__lowerCamelCase )
UpperCamelCase__: Tuple = input_a[input_name]
UpperCamelCase__: Optional[int] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
UpperCamelCase__: Union[str, Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(__lowerCamelCase , __lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , truncation=__lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding="longest" , truncation=__lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding="longest" , truncation=__lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__lowerCamelCase ):
feat_extract.pad(__lowerCamelCase , padding="max_length" , truncation=__lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase__: Tuple = 12
UpperCamelCase__: Optional[int] = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
UpperCamelCase__: str = input_a[input_name]
UpperCamelCase__: str = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowerCamelCase , )
UpperCamelCase__: List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCamelCase__: Optional[int] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCamelCase__: int = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
self._check_padding(numpify=__lowerCamelCase )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
self._check_padding(numpify=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
self._check_truncation(numpify=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
self._check_truncation(numpify=__lowerCamelCase )
@require_torch
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: Union[str, Any] = feat_extract.model_input_names[0]
UpperCamelCase__: Tuple = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: Tuple = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCamelCase__: int = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__: Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: str = feat_extract.model_input_names[0]
UpperCamelCase__: List[Any] = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: Union[str, Any] = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCamelCase__: List[Any] = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Dict = self.feat_extract_dict
UpperCamelCase__: List[str] = True
UpperCamelCase__: Optional[Any] = self.feature_extraction_class(**__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: int = [len(__lowerCamelCase ) for x in speech_inputs]
UpperCamelCase__: List[Any] = feat_extract.model_input_names[0]
UpperCamelCase__: Dict = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: List[Any] = feat_extract.pad(__lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.feat_extract_dict
UpperCamelCase__: List[Any] = True
UpperCamelCase__: Optional[int] = self.feature_extraction_class(**__lowerCamelCase )
UpperCamelCase__: Any = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__: List[Any] = [len(__lowerCamelCase ) for x in speech_inputs]
UpperCamelCase__: str = feat_extract.model_input_names[0]
UpperCamelCase__: Dict = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__: Optional[int] = min(__lowerCamelCase )
UpperCamelCase__: Tuple = feat_extract.pad(
__lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 380 | 0 |
def __A(lowerCAmelCase , lowerCAmelCase ) -> int:
"""simple docstring"""
return number | (1 << position)
def __A(lowerCAmelCase , lowerCAmelCase ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def __A(lowerCAmelCase , lowerCAmelCase ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def __A(lowerCAmelCase , lowerCAmelCase ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A(lowerCAmelCase , lowerCAmelCase ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=1_28 , a=32 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def A_ ( self ) -> int:
'''simple docstring'''
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A_ ( self , a , a , a , a , a , a , a ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = NezhaModel(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a )
_UpperCamelCase = model(a , token_type_ids=a )
_UpperCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , a , a , a , a , a , a , a , a , a , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = True
_UpperCamelCase = NezhaModel(a )
model.to(a )
model.eval()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , a , a , a , a , a , a , a ) -> Dict:
'''simple docstring'''
_UpperCamelCase = NezhaForMaskedLM(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , a , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = NezhaForNextSentencePrediction(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self , a , a , a , a , a , a , a ) -> Dict:
'''simple docstring'''
_UpperCamelCase = NezhaForPreTraining(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self , a , a , a , a , a , a , a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = NezhaForQuestionAnswering(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , a , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NezhaForSequenceClassification(a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , a , a , a , a , a , a , a ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NezhaForTokenClassification(config=a )
model.to(a )
model.eval()
_UpperCamelCase = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , a , a , a , a , a , a , a ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = NezhaForMultipleChoice(config=a )
model.to(a )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : int = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Dict = True
def A_ ( self , a , a , a=False ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = NezhaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=a , hidden_size=37 )
def A_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def A_ ( self ) -> Dict:
'''simple docstring'''
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , a , a , a , )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def A_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = NezhaModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=a )
_UpperCamelCase = self._prepare_for_class(a , a )
_UpperCamelCase = torch.jit.trace(
a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , """bert.pt""" ) )
_UpperCamelCase = torch.jit.load(os.path.join(a , """bert.pt""" ) , map_location=a )
loaded(inputs_dict["""input_ids"""].to(a ) , inputs_dict["""attention_mask"""].to(a ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
_UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(a , attention_mask=a )[0]
_UpperCamelCase = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , a )
_UpperCamelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
@slow
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
_UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(a , attention_mask=a )[0]
_UpperCamelCase = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , a )
_UpperCamelCase = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 202 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""", """False""" ) ) is not True, reason="""Skipping test because should only be run when releasing minor transformers version""", )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=lowercase , )
assert hasattr(self , 'env' )
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[Any] = F'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
_lowerCamelCase : Union[str, Any] = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowercase , instance_count=lowercase , instance_type=self.instance_type , debugger_hook_config=lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowercase , py_version='py36' , )
def A_ ( self , lowercase ):
TrainingJobAnalytics(lowercase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def A_ ( self , lowercase ):
# create estimator
_lowerCamelCase : List[Any] = self.create_estimator(lowercase )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowerCamelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowercase ) | 630 |
"""simple docstring"""
import qiskit
def _snake_case ( lowercase__ = 2 ):
_lowerCamelCase : Optional[Any] = qubits
# Using Aer's simulator
_lowerCamelCase : Dict = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_lowerCamelCase : Dict = qiskit.QuantumCircuit(lowercase__ , lowercase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowercase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowercase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowercase__ ) ) , list(range(lowercase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCamelCase : Optional[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1000 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F"Total count for various states are: {quantum_entanglement(3)}") | 630 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708 |
def __lowerCAmelCase ( __lowerCamelCase : int ) -> None:
__lowerCAmelCase =generate_pascal_triangle(__lowerCamelCase )
for row_idx in range(__lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def __lowerCAmelCase ( __lowerCamelCase : int ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__lowerCAmelCase =[]
for current_row_idx in range(__lowerCamelCase ):
__lowerCAmelCase =populate_current_row(__lowerCamelCase , __lowerCamelCase )
triangle.append(__lowerCamelCase )
return triangle
def __lowerCAmelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) -> list[int]:
__lowerCAmelCase =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__lowerCAmelCase , __lowerCAmelCase =1, 1
for current_col_idx in range(1 , __lowerCamelCase ):
calculate_current_element(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return current_row
def __lowerCAmelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , ) -> None:
__lowerCAmelCase =triangle[current_row_idx - 1][current_col_idx - 1]
__lowerCAmelCase =triangle[current_row_idx - 1][current_col_idx]
__lowerCAmelCase =above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( __lowerCamelCase : int ) -> list[list[int]]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
__lowerCAmelCase =[[1]]
for row_index in range(1 , __lowerCamelCase ):
__lowerCAmelCase =[0] + result[-1] + [0]
__lowerCAmelCase =row_index + 1
# Calculate the number of distinct elements in a row
__lowerCAmelCase =sum(divmod(__lowerCamelCase , 2 ) )
__lowerCAmelCase =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__lowerCAmelCase =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__lowerCAmelCase =row_first_half + row_second_half
result.append(__lowerCamelCase )
return result
def __lowerCAmelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCamelCase : Callable , __lowerCamelCase : int ) -> None:
__lowerCAmelCase =f"""{func.__name__}({value})"""
__lowerCAmelCase =timeit(f"""__main__.{call}""" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCamelCase , __lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 456 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a ( A__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 'wavlm'
def __init__( self : int , __snake_case : Dict=32 , __snake_case : List[str]=7_68 , __snake_case : List[Any]=12 , __snake_case : Union[str, Any]=12 , __snake_case : Tuple=30_72 , __snake_case : Union[str, Any]="gelu" , __snake_case : Dict=0.1 , __snake_case : List[str]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[int]=0.0 , __snake_case : str=0.1 , __snake_case : Tuple=0.1 , __snake_case : List[str]=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : Optional[int]="group" , __snake_case : Tuple="gelu" , __snake_case : int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __snake_case : Dict=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Any=(10, 3, 3, 3, 3, 2, 2) , __snake_case : Union[str, Any]=False , __snake_case : str=1_28 , __snake_case : List[str]=16 , __snake_case : int=3_20 , __snake_case : Optional[int]=8_00 , __snake_case : Tuple=False , __snake_case : List[Any]=True , __snake_case : Optional[int]=0.05 , __snake_case : List[Any]=10 , __snake_case : Dict=2 , __snake_case : Optional[Any]=0.0 , __snake_case : str=10 , __snake_case : Any=3_20 , __snake_case : List[Any]=2 , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=1_00 , __snake_case : Dict=2_56 , __snake_case : Union[str, Any]=2_56 , __snake_case : str=0.1 , __snake_case : Optional[Any]="mean" , __snake_case : Dict=False , __snake_case : str=False , __snake_case : Dict=2_56 , __snake_case : Any=(5_12, 5_12, 5_12, 5_12, 15_00) , __snake_case : List[str]=(5, 3, 3, 1, 1) , __snake_case : Tuple=(1, 2, 3, 1, 1) , __snake_case : Dict=5_12 , __snake_case : str=80 , __snake_case : List[Any]=0 , __snake_case : Union[str, Any]=1 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=False , __snake_case : int=3 , __snake_case : int=2 , __snake_case : Optional[int]=3 , __snake_case : Optional[Any]=None , **__snake_case : str , ):
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = feat_extract_norm
UpperCAmelCase_ = feat_extract_activation
UpperCAmelCase_ = list(lowerCamelCase__ )
UpperCAmelCase_ = list(lowerCamelCase__ )
UpperCAmelCase_ = list(lowerCamelCase__ )
UpperCAmelCase_ = conv_bias
UpperCAmelCase_ = num_buckets
UpperCAmelCase_ = max_bucket_distance
UpperCAmelCase_ = num_conv_pos_embeddings
UpperCAmelCase_ = num_conv_pos_embedding_groups
UpperCAmelCase_ = len(self.conv_dim )
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = feat_proj_dropout
UpperCAmelCase_ = final_dropout
UpperCAmelCase_ = layerdrop
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_ctc_classes
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = do_stable_layer_norm
UpperCAmelCase_ = use_weighted_layer_sum
UpperCAmelCase_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ = apply_spec_augment
UpperCAmelCase_ = mask_time_prob
UpperCAmelCase_ = mask_time_length
UpperCAmelCase_ = mask_time_min_masks
UpperCAmelCase_ = mask_feature_prob
UpperCAmelCase_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ = num_codevectors_per_group
UpperCAmelCase_ = num_codevector_groups
UpperCAmelCase_ = contrastive_logits_temperature
UpperCAmelCase_ = num_negatives
UpperCAmelCase_ = codevector_dim
UpperCAmelCase_ = proj_codevector_dim
UpperCAmelCase_ = diversity_loss_weight
# ctc loss
UpperCAmelCase_ = ctc_loss_reduction
UpperCAmelCase_ = ctc_zero_infinity
# adapter
UpperCAmelCase_ = add_adapter
UpperCAmelCase_ = adapter_kernel_size
UpperCAmelCase_ = adapter_stride
UpperCAmelCase_ = num_adapter_layers
UpperCAmelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ = list(lowerCamelCase__ )
UpperCAmelCase_ = list(lowerCamelCase__ )
UpperCAmelCase_ = list(lowerCamelCase__ )
UpperCAmelCase_ = xvector_output_dim
@property
def lowerCamelCase_ ( self : Tuple ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 144 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {str(digit): digit**5 for digit in range(1_0)}
def lowerCAmelCase_( lowercase_ : int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase_ ) )
def lowerCAmelCase_( ) -> int:
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(lowercase_ ) )
if __name__ == "__main__":
print(solution())
| 661 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : str = 16
_a : Union[str, Any] = 32
def a__ ( a : Union[str, Any] , a : Optional[int] = 16 ):
"""simple docstring"""
_snake_case : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_snake_case : Dict = load_dataset("glue" , "mrpc" )
def tokenize_function(a : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : Optional[int] = datasets.map(
a__ , batched=a__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(a : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : int = 16
elif accelerator.mixed_precision != "no":
_snake_case : str = 8
else:
_snake_case : Optional[int] = None
return tokenizer.pad(
a__ , padding="longest" , max_length=a__ , pad_to_multiple_of=a__ , return_tensors="pt" , )
# Instantiate dataloaders.
_snake_case : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
_snake_case : int = DataLoader(
tokenized_datasets["validation"] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : int = mocked_dataloaders # noqa: F811
def a__ ( a : str , a : Any ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , a__ ) == "1":
_snake_case : Union[str, Any] = 2
# Initialize accelerator
_snake_case : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Dict = config["lr"]
_snake_case : str = int(config["num_epochs"] )
_snake_case : Optional[int] = int(config["seed"] )
_snake_case : Optional[int] = int(config["batch_size"] )
_snake_case : Tuple = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_snake_case : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_snake_case : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_snake_case : Dict = MAX_GPU_BATCH_SIZE
set_seed(a__ )
_snake_case , _snake_case : Optional[Any] = get_dataloaders(a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : int = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : Dict = AdamW(params=model.parameters() , lr=a__ )
# Instantiate scheduler
_snake_case : Any = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=100 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# Now we train the model
for epoch in range(a__ ):
model.train()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : Tuple = model(**a__ )
_snake_case : int = outputs.loss
_snake_case : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_snake_case : List[Any] = 0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Optional[Any] = model(**a__ )
_snake_case : Union[str, Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Optional[Any] = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(a__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_snake_case : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_snake_case : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=a__ , references=a__ , )
_snake_case : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , a__ )
def a__ ( ):
"""simple docstring"""
_snake_case : int = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=a__ , default=a__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_snake_case : Tuple = parser.parse_args()
_snake_case : Optional[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 714 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( a : List[str] , a : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Any = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("metadata" )
_snake_case : Optional[int] = "".join(split_layer[0] )[:-1]
_snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Any = layer.split("kvstore" )
_snake_case : str = "".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : List[Any] = layer.split("/" )
_snake_case : Tuple = "/".join(split_layer[:-1] )
_snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_snake_case : Tuple = "file"
else:
_snake_case : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = rename_keys(a )
_snake_case : int = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Optional[int] = new_current_block
torch.save(a , a )
def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ):
"""simple docstring"""
_snake_case : Any = convert_file_size_to_int(a )
_snake_case : Tuple = []
_snake_case : Optional[int] = {}
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(a , sep="/" )
_snake_case : Optional[Any] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
_snake_case : Dict = content
else:
_snake_case : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Dict = torch.tensor(a )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
_snake_case : Optional[Any] = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Any = os.path.join(
a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : List[Any] = {}
_snake_case : str = 0
_snake_case : List[str] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : str = {}
_snake_case : Any = {}
for idx, shard in enumerate(a ):
_snake_case : Optional[int] = weights_name.replace(
".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d}
_snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a , os.path.join(a , a ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : List[Any] = {"total_size": total_size}
_snake_case : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" )
_snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids
_snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 87 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]="attention" ):
'''simple docstring'''
__lowercase = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__lowercase = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__lowercase = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__lowercase = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
if split_mlp_wi:
__lowercase = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__lowercase = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__lowercase = (wi_a, wi_a)
else:
__lowercase = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
__lowercase = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
'''simple docstring'''
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowercase__ ( __UpperCamelCase : dict , *, __UpperCamelCase : int , __UpperCamelCase : bool ):
'''simple docstring'''
__lowercase = traverse_util.flatten_dict(variables["""target"""] )
__lowercase = {'/'.join(_SCREAMING_SNAKE_CASE ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowercase = 'encoder/layers_0/mlp/wi_0/kernel' in old
print("""Split MLP:""" , _SCREAMING_SNAKE_CASE )
__lowercase = collections.OrderedDict()
# Shared embeddings.
__lowercase = old['token_embedder/embedding']
# Encoder.
for i in range(_SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
__lowercase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encoder""" , """pre_attention_layer_norm""" )
__lowercase = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encoder""" , """attention""" )
__lowercase = layer_norm
__lowercase = k.T
__lowercase = o.T
__lowercase = q.T
__lowercase = v.T
# Block i, layer 1 (MLP).
__lowercase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encoder""" , """pre_mlp_layer_norm""" )
__lowercase = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """encoder""" , _SCREAMING_SNAKE_CASE )
__lowercase = layer_norm
if split_mlp_wi:
__lowercase = wi[0].T
__lowercase = wi[1].T
else:
__lowercase = wi.T
__lowercase = wo.T
__lowercase = old[
'encoder/relpos_bias/rel_embedding'
].T
__lowercase = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(_SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
__lowercase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """pre_self_attention_layer_norm""" )
__lowercase = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """self_attention""" )
__lowercase = layer_norm
__lowercase = k.T
__lowercase = o.T
__lowercase = q.T
__lowercase = v.T
# Block i, layer 1 (Cross Attention).
__lowercase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowercase = tax_attention_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """encoder_decoder_attention""" )
__lowercase = layer_norm
__lowercase = k.T
__lowercase = o.T
__lowercase = q.T
__lowercase = v.T
# Block i, layer 2 (MLP).
__lowercase = tax_layer_norm_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , """pre_mlp_layer_norm""" )
__lowercase = tax_mlp_lookup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """decoder""" , _SCREAMING_SNAKE_CASE )
__lowercase = layer_norm
if split_mlp_wi:
__lowercase = wi[0].T
__lowercase = wi[1].T
else:
__lowercase = wi.T
__lowercase = wo.T
__lowercase = old['decoder/decoder_norm/scale']
__lowercase = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowercase = old['decoder/logits_dense/kernel'].T
return new
def lowercase__ ( __UpperCamelCase : List[str] , __UpperCamelCase : bool ):
'''simple docstring'''
__lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowercase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowercase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowercase = state_dict['shared.weight']
return state_dict
def lowercase__ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE )
__lowercase = convert_tax_to_pytorch(_SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=_SCREAMING_SNAKE_CASE )
__lowercase = make_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : bool = False ):
'''simple docstring'''
__lowercase = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowercase = TaEncoderModel(_SCREAMING_SNAKE_CASE )
else:
__lowercase = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Verify that we can load the checkpoint.
model.from_pretrained(_SCREAMING_SNAKE_CASE )
print("""Done""" )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
snake_case : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 566 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCamelCase (_SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : set , _SCREAMING_SNAKE_CASE : set , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : PriorityQueue , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__a : Any = cst_fwd.get(_SCREAMING_SNAKE_CASE , np.inf )
__a : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__a : Union[str, Any] = new_cost_f
__a : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__a : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict ):
__a : Union[str, Any] = -1
__a : str = set()
__a : str = set()
__a : List[str] = {source: 0}
__a : Dict = {destination: 0}
__a : Optional[int] = {source: None}
__a : Union[str, Any] = {destination: None}
__a : PriorityQueue[Any] = PriorityQueue()
__a : PriorityQueue[Any] = PriorityQueue()
__a : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__a , __a : List[str] = queue_forward.get()
visited_forward.add(_SCREAMING_SNAKE_CASE )
__a , __a : Tuple = queue_backward.get()
visited_backward.add(_SCREAMING_SNAKE_CASE )
__a : List[str] = pass_and_relaxation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
__a : Optional[Any] = pass_and_relaxation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__a : int = shortest_distance
return shortest_path_distance
__lowercase : List[str] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
__lowercase : Any = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=[] )-> Any:
__UpperCAmelCase = size[0] - overlap_pixels * 2
__UpperCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__UpperCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__UpperCAmelCase = np.pad(_lowerCAmelCase , mode='linear_ramp' , pad_width=_lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
__UpperCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__UpperCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__UpperCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__UpperCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Any:
return max(_lowerCAmelCase , min(_lowerCAmelCase , _lowerCAmelCase ) )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Dict:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Dict:
__UpperCAmelCase = list(_lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__UpperCAmelCase = clamp_rect(_lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Tuple:
__UpperCAmelCase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_lowerCAmelCase , (original_slice, 0) )
return result
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> List[Any]:
__UpperCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__UpperCAmelCase = tile.crop(_lowerCAmelCase )
return tile
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Optional[int]:
__UpperCAmelCase = n % d
return n - divisor
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
torch.manual_seed(0 )
__UpperCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__UpperCAmelCase = add_overlap_rect(__A , __A , image.size )
__UpperCAmelCase = image.crop(__A )
__UpperCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__UpperCAmelCase = translated_slice_x - (original_image_slice / 2)
__UpperCAmelCase = max(0 , __A )
__UpperCAmelCase = squeeze_tile(__A , __A , __A , __A )
__UpperCAmelCase = to_input.size
__UpperCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__UpperCAmelCase = super(__A , self ).__call__(image=__A , **__A ).images[0]
__UpperCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__UpperCAmelCase = unsqueeze_tile(__A , __A )
__UpperCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__UpperCAmelCase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__UpperCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode='L' , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
__UpperCAmelCase = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__UpperCAmelCase = math.ceil(image.size[0] / tile_size )
__UpperCAmelCase = math.ceil(image.size[1] / tile_size )
__UpperCAmelCase = tcx * tcy
__UpperCAmelCase = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _lowerCAmelCase ( )-> str:
# Run a demo
__UpperCAmelCase = 'stabilityai/stable-diffusion-x4-upscaler'
__UpperCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(_lowerCAmelCase , revision='fp16' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to('cuda' )
__UpperCAmelCase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(_lowerCAmelCase ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('diffusers_library_progress.jpg' )
__UpperCAmelCase = pipe(image=_lowerCAmelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=_lowerCAmelCase )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 617 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _lowerCAmelCase ( _lowerCAmelCase = 3 )-> qiskit.result.counts.Counts:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_lowerCAmelCase ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__UpperCAmelCase = QuantumRegister(_lowerCAmelCase , 'qr' )
__UpperCAmelCase = ClassicalRegister(_lowerCAmelCase , 'cr' )
__UpperCAmelCase = QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
__UpperCAmelCase = number_of_qubits
for i in range(_lowerCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCAmelCase , _lowerCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCAmelCase , _lowerCAmelCase )
# simulate with 10000 shots
__UpperCAmelCase = Aer.get_backend('qasm_simulator' )
__UpperCAmelCase = execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_00_00 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 617 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( SCREAMING_SNAKE_CASE__):
snake_case__ : Any = "AutoTokenizer"
snake_case__ : Any = ["tokenizer"]
snake_case__ : List[Any] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any]=None ):
"""simple docstring"""
super().__init__(_lowercase )
_lowerCamelCase : List[Any] = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]="speaker_embeddings_path.json" , **__lowerCAmelCase : Dict ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : str = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop('''subfolder''' , _lowercase ) , cache_dir=kwargs.pop('''cache_dir''' , _lowercase ) , force_download=kwargs.pop('''force_download''' , _lowercase ) , proxies=kwargs.pop('''proxies''' , _lowercase ) , resume_download=kwargs.pop('''resume_download''' , _lowercase ) , local_files_only=kwargs.pop('''local_files_only''' , _lowercase ) , use_auth_token=kwargs.pop('''use_auth_token''' , _lowercase ) , revision=kwargs.pop('''revision''' , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCamelCase : int = None
else:
with open(_lowercase ) as speaker_embeddings_json:
_lowerCamelCase : List[Any] = json.load(_lowercase )
else:
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Any = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple="speaker_embeddings_path.json" , __lowerCAmelCase : Union[str, Any]="speaker_embeddings" , __lowerCAmelCase : bool = False , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , '''v2''' ) , exist_ok=_lowercase )
_lowerCamelCase : Any = {}
_lowerCamelCase : int = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Dict = self._load_voice_preset(_lowercase )
_lowerCamelCase : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
_lowerCamelCase : List[Any] = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
_lowerCamelCase : List[Any] = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str = None , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.speaker_embeddings[voice_preset]
_lowerCamelCase : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCamelCase : Dict = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _lowercase ) , cache_dir=kwargs.pop('''cache_dir''' , _lowercase ) , force_download=kwargs.pop('''force_download''' , _lowercase ) , proxies=kwargs.pop('''proxies''' , _lowercase ) , resume_download=kwargs.pop('''resume_download''' , _lowercase ) , local_files_only=kwargs.pop('''local_files_only''' , _lowercase ) , use_auth_token=kwargs.pop('''use_auth_token''' , _lowercase ) , revision=kwargs.pop('''revision''' , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCamelCase : int = np.load(_lowercase )
return voice_preset_dict
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[dict] = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]="pt" , __lowerCAmelCase : Tuple=2_5_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , **__lowerCAmelCase : Any , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Optional[int] = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith('''.npz''' ):
_lowerCamelCase : Any = voice_preset + ".npz"
_lowerCamelCase : Any = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
_lowerCamelCase : Optional[Any] = BatchFeature(data=_lowercase , tensor_type=_lowercase )
_lowerCamelCase : Optional[int] = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding='''max_length''' , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
_lowerCamelCase : List[Any] = voice_preset
return encoded_text
| 83 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
__magic_name__ = RobertaTokenizer
__magic_name__ = RobertaTokenizerFast
__magic_name__ = True
__magic_name__ = {"""cls_token""": """<s>"""}
def __lowercase ( self : Any ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case : Optional[int] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case : Union[str, Any] = {"unk_token": "<unk>"}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
def __lowercase ( self : Optional[Any] , **_lowercase : Union[str, Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def __lowercase ( self : List[str] , **_lowercase : Union[str, Any] ) -> str:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def __lowercase ( self : Union[str, Any] , _lowercase : str ) -> Optional[Any]:
snake_case : Any = "lower newer"
snake_case : Union[str, Any] = "lower newer"
return input_text, output_text
def __lowercase ( self : Optional[int] ) -> Any:
snake_case : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : Dict = "lower newer"
snake_case : Dict = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case : Any = tokenizer.tokenize(_lowercase ) # , add_prefix_space=True)
self.assertListEqual(_lowercase , _lowercase )
snake_case : List[str] = tokens + [tokenizer.unk_token]
snake_case : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def __lowercase ( self : int ) -> str:
snake_case : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=_lowercase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=_lowercase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __lowercase ( self : Dict ) -> Optional[int]:
snake_case : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case : Tuple = tokenizer.encode("sequence builders" , add_special_tokens=_lowercase )
snake_case : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowercase )
snake_case : Optional[Any] = tokenizer.encode(
"sequence builders" , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
snake_case : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase )
snake_case : Any = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowercase ( self : int ) -> Any:
snake_case : Any = self.get_tokenizer()
snake_case : Optional[Any] = "Encode this sequence."
snake_case : List[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case : Any = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowercase , _lowercase )
snake_case : Dict = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowercase , _lowercase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
# Testing spaces after special tokens
snake_case : Optional[int] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase )} ) # mask token has a left space
snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
snake_case : Tuple = "Encode <mask> sequence"
snake_case : Union[str, Any] = "Encode <mask>sequence"
snake_case : Union[str, Any] = tokenizer.encode(_lowercase )
snake_case : Optional[int] = encoded.index(_lowercase )
snake_case : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowercase , _lowercase )
snake_case : int = tokenizer.encode(_lowercase )
snake_case : Optional[int] = encoded.index(_lowercase )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
def __lowercase ( self : Dict ) -> Union[str, Any]:
pass
def __lowercase ( self : List[str] ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case : Optional[int] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case : Optional[int] = "A, <mask> AllenNLP sentence."
snake_case : Tuple = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
snake_case : Optional[Any] = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __lowercase ( self : Tuple ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , _lowercase )
self.assertEqual(post_processor_state["add_prefix_space"] , _lowercase )
self.assertEqual(post_processor_state["trim_offsets"] , _lowercase )
def __lowercase ( self : int ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case : List[Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case : List[str] = F'''{text_of_1_token} {text_of_1_token}'''
snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Union[str, Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Tuple = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Dict = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : int = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Optional[int] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Dict = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Optional[int] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
snake_case : Union[str, Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
| 449 | 0 |
def UpperCAmelCase_ ( __lowerCAmelCase ) -> set:
__lowercase : List[Any] = set()
# edges = list of graph's edges
__lowercase : Dict = get_edges(__lowerCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__lowercase , __lowercase : List[Any] = edges.pop()
chosen_vertices.add(__lowerCAmelCase )
chosen_vertices.add(__lowerCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowerCAmelCase )
return chosen_vertices
def UpperCAmelCase_ ( __lowerCAmelCase ) -> set:
__lowercase : Optional[int] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 284 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def snake_case_ ( self : Optional[int] ):
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : Tuple = 8
# DPR tok
__lowercase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase : Optional[Any] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
__lowercase : str = os.path.join(_snake_case , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowercase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowercase : Optional[Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowercase : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase : Any = {'''unk_token''': '''<unk>'''}
__lowercase : int = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
__lowercase : List[Any] = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Tuple = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def snake_case_ ( self : List[Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def snake_case_ ( self : Dict ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def snake_case_ ( self : Optional[int] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def snake_case_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Tuple ):
__lowercase : Dict = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Union[str, Any] = self.get_dummy_dataset()
__lowercase : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowercase : List[Any] = dataset
__lowercase : str = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def snake_case_ ( self : int , _snake_case : bool ):
__lowercase : Dict = self.get_dummy_dataset()
__lowercase : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowercase : List[Any] = os.path.join(self.tmpdirname , '''dataset''' )
__lowercase : Union[str, Any] = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowercase : Optional[Any] = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowercase : Optional[Any] = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _snake_case ) , )
return retriever
def snake_case_ ( self : str ):
__lowercase : List[str] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowercase : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowercase : str = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowercase : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_snake_case , open(_snake_case , '''wb''' ) )
__lowercase : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowercase : Optional[int] = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[str] = 1
__lowercase : Tuple = self.get_dummy_canonical_hf_index_retriever()
__lowercase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : str = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : int ):
__lowercase : int = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowercase : Optional[Any] = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
__lowercase : str = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Union[str, Any] = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ ( self : str ):
__lowercase : List[str] = 1
__lowercase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
__lowercase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : Any ):
__lowercase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
__lowercase : Optional[Any] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : List[Any] = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ ( self : List[Any] ):
__lowercase : Any = 1
__lowercase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
__lowercase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : Union[str, Any] = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : Any ):
__lowercase : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
__lowercase : Optional[int] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ ( self : Tuple ):
__lowercase : Optional[int] = 1
__lowercase : str = self.get_dummy_legacy_index_retriever()
__lowercase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Tuple = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
__lowercase : Tuple = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case_ ( self : Optional[Any] ):
import torch
__lowercase : Tuple = 1
__lowercase : Any = self.get_dummy_canonical_hf_index_retriever()
__lowercase : str = [[5, 7], [10, 11]]
__lowercase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Any = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
__lowercase , __lowercase , __lowercase : Tuple = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , np.ndarray )
__lowercase : Optional[Any] = retriever(
_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case , return_tensors='''pt''' , )
__lowercase , __lowercase , __lowercase , __lowercase : Optional[Any] = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case_ ( self : List[Any] ):
__lowercase : Tuple = self.get_dpr_ctx_encoder_tokenizer()
__lowercase : str = 1
__lowercase : int = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
__lowercase : Tuple = [[5, 7], [10, 11]]
__lowercase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Any = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _snake_case ) # check for doc token related keys in dictionary.
| 284 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a__ ( snake_case__ ):
lowerCamelCase : int ="vit_msn"
def __init__( self : Dict , a : Dict=7_68 , a : Any=12 , a : Optional[int]=12 , a : List[Any]=30_72 , a : Union[str, Any]="gelu" , a : Optional[int]=0.0 , a : Any=0.0 , a : Union[str, Any]=0.02 , a : int=1e-0_6 , a : Union[str, Any]=2_24 , a : str=16 , a : Union[str, Any]=3 , a : Optional[Any]=True , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(**A__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
| 546 |
from math import pi
def lowercase ( _a ,_a ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 137 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_a = 16
_a = 32
def lowerCamelCase__ ( __snake_case, __snake_case = 16, __snake_case = "bert-base-cased" ) -> str:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained(__snake_case )
_UpperCamelCase = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(__snake_case ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__snake_case, max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase = datasets.map(
__snake_case, batched=__snake_case, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case, padding='''max_length''', max_length=1_28, return_tensors='''pt''' )
return tokenizer.pad(__snake_case, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(
tokenized_datasets['''train'''], shuffle=__snake_case, collate_fn=__snake_case, batch_size=__snake_case )
_UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''], shuffle=__snake_case, collate_fn=__snake_case, batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config['''lr''']
_UpperCamelCase = int(config['''num_epochs'''] )
_UpperCamelCase = int(config['''seed'''] )
_UpperCamelCase = int(config['''batch_size'''] )
_UpperCamelCase = args.model_name_or_path
set_seed(__snake_case )
_UpperCamelCase , _UpperCamelCase = get_dataloaders(__snake_case, __snake_case, __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(__snake_case, return_dict=__snake_case )
# Instantiate optimizer
_UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCamelCase = optimizer_cls(params=model.parameters(), lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
_UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_UpperCamelCase = 1
_UpperCamelCase = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=__snake_case, num_warmup_steps=0, num_training_steps=__snake_case, )
else:
_UpperCamelCase = DummyScheduler(__snake_case, total_num_steps=__snake_case, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCamelCase = 0
# Now we train the model
_UpperCamelCase = evaluate.load('''glue''', '''mrpc''' )
_UpperCamelCase = 0
_UpperCamelCase = {}
for epoch in range(__snake_case, __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
_UpperCamelCase = model(**__snake_case )
_UpperCamelCase = outputs.loss
_UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_UpperCamelCase = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase = model(**__snake_case )
_UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCamelCase , _UpperCamelCase = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
_UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case, references=__snake_case, )
_UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', __snake_case )
_UpperCamelCase = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
_UpperCamelCase = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, '''all_results.json''' ), '''w''' ) as f:
json.dump(__snake_case, __snake_case )
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=__snake_case, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=__snake_case, )
parser.add_argument(
'''--output_dir''', type=__snake_case, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--performance_lower_bound''', type=__snake_case, default=__snake_case, help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''', )
parser.add_argument(
'''--num_epochs''', type=__snake_case, default=3, help='''Number of train epochs.''', )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case, __snake_case )
if __name__ == "__main__":
main()
| 710 |
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCAmelCase:
def __init__( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase ( self , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = pos
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Tuple:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , __a)
self.top_to_bottom(__a , __a , __a , __a)
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , __a)
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(__a , __a)
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(__a , 0)
def UpperCAmelCase ( self , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = len(__a) // 2 - 1
for i in range(__a , -1 , -1):
self.top_to_bottom(__a , __a , len(__a) , __a)
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a) , __a)
return temp
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case, __snake_case )
for _ in range(1, len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case, __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case, heap.get_position(__snake_case ), __snake_case, __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a = int(input("""Enter number of edges: """).strip())
_a = defaultdict(list)
for _ in range(edges_number):
_a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 78 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 635 | import numpy as np
import datasets
SCREAMING_SNAKE_CASE : Optional[int] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
SCREAMING_SNAKE_CASE : Dict = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
SCREAMING_SNAKE_CASE : List[str] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def A_ (self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def A_ (self , __UpperCamelCase , __UpperCamelCase ) -> int:
# convert to numpy arrays
UpperCamelCase_ : int = np.array(__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = np.array(__UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
UpperCamelCase_ : str = X - np.mean(__UpperCamelCase )
UpperCamelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCamelCase_ : str = np.linalg.inv(__UpperCamelCase )
except np.linalg.LinAlgError:
UpperCamelCase_ : List[Any] = np.linalg.pinv(__UpperCamelCase )
UpperCamelCase_ : Tuple = np.dot(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Tuple = np.dot(__UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 635 | 1 |
'''simple docstring'''
UpperCamelCase_ : Tuple = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase_ : Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def _lowerCAmelCase (_lowercase ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _lowerCAmelCase (_lowercase ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def _lowerCAmelCase ():
a__ = "Morse code here!"
print(_lowercase )
a__ = encrypt(_lowercase )
print(_lowercase )
a__ = decrypt(_lowercase )
print(_lowercase )
if __name__ == "__main__":
main()
| 710 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : Tuple = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 394 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
a : Union[str, Any] = logging.get_logger(__name__)
a : Any = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _UpperCamelCase ( _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1_0_2_4,
"""hidden_size""": 7_6_8,
"""max_length""": 5_1_2,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1_0_2_4,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
_UpperCAmelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_UpperCAmelCase = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=_A , output_all_encodings=_A , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , _A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_UpperCAmelCase = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
_UpperCAmelCase = os.path.join(get_home_dir() , """models""" )
_UpperCAmelCase = _load_vocab(_A , _A , _A , cls=_A )
_UpperCAmelCase = nlp.model.BERTModel(
_A , len(_A ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=_A , use_token_type_embed=_A , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=_A , use_decoder=_A , )
original_bort.load_parameters(_A , cast_dtype=_A , ignore_extra=_A )
_UpperCAmelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
_UpperCAmelCase = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(_A ),
}
_UpperCAmelCase = BertConfig.from_dict(_A )
_UpperCAmelCase = BertForMaskedLM(_A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_A ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_A , _A ):
_UpperCAmelCase = hf_param.shape
_UpperCAmelCase = to_torch(params[gluon_param] )
_UpperCAmelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
_UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
_UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
_UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
_UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_UpperCAmelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_UpperCAmelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
_UpperCAmelCase = layer.attention.self
_UpperCAmelCase = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
_UpperCAmelCase = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
_UpperCAmelCase = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
_UpperCAmelCase = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
_UpperCAmelCase = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
_UpperCAmelCase = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
_UpperCAmelCase = layer.attention.output
_UpperCAmelCase = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
_UpperCAmelCase = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
_UpperCAmelCase = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
_UpperCAmelCase = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
_UpperCAmelCase = layer.intermediate
_UpperCAmelCase = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
_UpperCAmelCase = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
_UpperCAmelCase = layer.output
_UpperCAmelCase = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
_UpperCAmelCase = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
_UpperCAmelCase = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
_UpperCAmelCase = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_UpperCAmelCase = RobertaTokenizer.from_pretrained("""roberta-base""" )
_UpperCAmelCase = tokenizer.encode_plus(_A )["""input_ids"""]
# Get gluon output
_UpperCAmelCase = mx.nd.array([input_ids] )
_UpperCAmelCase = original_bort(inputs=_A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_A )
_UpperCAmelCase = BertModel.from_pretrained(_A )
hf_bort_model.eval()
_UpperCAmelCase = tokenizer.encode_plus(_A , return_tensors="""pt""" )
_UpperCAmelCase = hf_bort_model(**_A )[0]
_UpperCAmelCase = output_gluon[0].asnumpy()
_UpperCAmelCase = output_hf[0].detach().numpy()
_UpperCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_UpperCAmelCase = np.allclose(_A , _A , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , _A )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : int = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 555 |
"""simple docstring"""
import os
from math import logaa
def _UpperCamelCase ( _A = "base_exp.txt" ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_A ) , _A ) ) ):
_UpperCAmelCase ,_UpperCAmelCase = list(map(_A , line.split(""",""" ) ) )
if x * logaa(_A ) > largest:
_UpperCAmelCase = x * logaa(_A )
_UpperCAmelCase = i + 1
return result
if __name__ == "__main__":
print(solution()) | 555 | 1 |
import math
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_SCREAMING_SNAKE_CASE ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 720 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : int = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138 | 0 |
'''simple docstring'''
import os
def UpperCamelCase__ ( ):
"""simple docstring"""
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + """/p022_names.txt""" ) as file:
_lowerCAmelCase = str(file.readlines()[0] )
_lowerCAmelCase = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i, name in enumerate(SCREAMING_SNAKE_CASE__ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE__ ) - 64
total_score += (i + 1) * name_score
_lowerCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 207 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = PegasusTokenizer
lowerCAmelCase_ : Optional[Any] = PegasusTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[str] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """</s>"""
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_UpperCAmelCase ) , 11_03 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
UpperCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase__ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
UpperCAmelCase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
UpperCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase__ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
UpperCAmelCase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = ["""This is going to be way too long.""" * 1_50, """short example"""]
UpperCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
UpperCAmelCase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = {"""input_ids""": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = PegasusTokenizer
lowerCAmelCase_ : int = PegasusTokenizerFast
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : str ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Any ):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
UpperCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = ["""This is going to be way too long.""" * 10_00, """short example"""]
UpperCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
UpperCAmelCase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase__ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 603 | 0 |
import numpy as np
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return 1 / (1 + np.exp(-vector ))
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = nn.functional.normalize(SCREAMING_SNAKE_CASE__ )
return torch.mm(SCREAMING_SNAKE_CASE__ , normalized_text_embeds.t() )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = CLIPConfig
__snake_case = ["""CLIPEncoderLayer"""]
def __init__( self: List[Any] , a: CLIPConfig ):
super().__init__(a )
__lowerCamelCase : List[str] = CLIPVisionModel(config.vision_config )
__lowerCamelCase : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=a )
__lowerCamelCase : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=a )
__lowerCamelCase : List[str] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=a )
__lowerCamelCase : Any = nn.Parameter(torch.ones(17 ) , requires_grad=a )
__lowerCamelCase : Any = nn.Parameter(torch.ones(3 ) , requires_grad=a )
@torch.no_grad()
def _snake_case ( self: Any , a: List[Any] , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = self.vision_model(a )[1] # pooled_output
__lowerCamelCase : Dict = self.visual_projection(a )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase : int = cosine_distance(a , self.special_care_embeds ).cpu().float().numpy()
__lowerCamelCase : Optional[int] = cosine_distance(a , self.concept_embeds ).cpu().float().numpy()
__lowerCamelCase : List[str] = []
__lowerCamelCase : Tuple = image_embeds.shape[0]
for i in range(a ):
__lowerCamelCase : int = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCamelCase : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__lowerCamelCase : List[Any] = special_cos_dist[i][concept_idx]
__lowerCamelCase : str = self.special_care_embeds_weights[concept_idx].item()
__lowerCamelCase : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
__lowerCamelCase : Optional[Any] = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
__lowerCamelCase : Optional[Any] = cos_dist[i][concept_idx]
__lowerCamelCase : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__lowerCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(a )
result.append(a )
__lowerCamelCase : Tuple = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case ( self: str , a: torch.FloatTensor , a: torch.FloatTensor ):
__lowerCamelCase : Optional[int] = self.vision_model(a )[1] # pooled_output
__lowerCamelCase : str = self.visual_projection(a )
__lowerCamelCase : str = cosine_distance(a , self.special_care_embeds )
__lowerCamelCase : Dict = cosine_distance(a , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__lowerCamelCase : List[str] = 0.0
__lowerCamelCase : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__lowerCamelCase : int = torch.any(special_scores > 0 , dim=1 )
__lowerCamelCase : List[str] = special_care * 0.0_1
__lowerCamelCase : str = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__lowerCamelCase : Dict = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__lowerCamelCase : Optional[Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 230 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = 8.988e9 # units = N * m^s * C^-2
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
__a : Union[str, Any] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
__a : Union[str, Any] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__a : Dict = abs(_lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__a : int = abs(_lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__a : Union[str, Any] = (COULOMBS_CONSTANT * charge_product / abs(_lowerCamelCase )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 581 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=False ):
__a : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__a : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Tuple=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a : str = """"""
else:
__a : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__a : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__a : List[Any] = in_proj_weight[
: config.hidden_size, :
]
__a : Any = in_proj_bias[: config.hidden_size]
__a : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__a : Optional[int] = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str ):
__a : Any = dct.pop(_lowerCamelCase )
__a : List[Any] = val
def __magic_name__ ( ):
__a : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a : Any = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
__a : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
__a : List[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__a : Any = 1_0_0_0
__a : Tuple = """huggingface/label-files"""
__a : int = """imagenet-1k-id2label.json"""
__a : int = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__a : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : Tuple = {v: k for k, v in idalabel.items()}
__a : int = int(deit_name[-6:-4] )
__a : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
__a : str = 1_9_2
__a : Union[str, Any] = 7_6_8
__a : Any = 1_2
__a : Optional[int] = 3
elif deit_name[9:].startswith("""small""" ):
__a : Union[str, Any] = 3_8_4
__a : int = 1_5_3_6
__a : Tuple = 1_2
__a : Dict = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
__a : Optional[Any] = 1_0_2_4
__a : Optional[Any] = 4_0_9_6
__a : Optional[int] = 2_4
__a : List[Any] = 1_6
# load original model from timm
__a : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__a : List[str] = timm_model.state_dict()
__a : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
__a : Union[str, Any] = DeiTForImageClassificationWithTeacher(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
__a : Optional[int] = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__a : Dict = DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size )
__a : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__a : Tuple = encoding["""pixel_values"""]
__a : int = model(_lowerCamelCase )
__a : Dict = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 581 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: List[Any] = IFImgaImgSuperResolutionPipeline
a_: List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
a_: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
a_: Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase__ ( self : Dict ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple=0 ):
if str(lowerCamelCase_ ).startswith("""mps""" ):
_lowerCAmelCase =torch.manual_seed(lowerCamelCase_ )
else:
_lowerCAmelCase =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowerCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowerCAmelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowerCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCAmelCase__ ( self : Optional[int] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self : Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self : Any ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self : Tuple ):
self._test_save_load_local()
def lowerCAmelCase__ ( self : int ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 149 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : Optional[int]=32 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : Any=[10, 20, 30, 40] , lowerCamelCase_ : List[Any]=[1, 1, 2, 1] , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[int]="relu" , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : int=None , ):
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =embeddings_size
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =depths
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_act
_lowerCAmelCase =num_labels
_lowerCAmelCase =scope
_lowerCAmelCase =len(lowerCamelCase_ )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =self.get_config()
return config, pixel_values
def lowerCAmelCase__ ( self : int ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : List[str] ):
_lowerCAmelCase =FlaxRegNetModel(config=lowerCamelCase_ )
_lowerCAmelCase =model(lowerCamelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =FlaxRegNetForImageClassification(config=lowerCamelCase_ )
_lowerCAmelCase =model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __lowerCamelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_: Optional[int] = False
a_: Any = False
a_: Union[str, Any] = False
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =FlaxRegNetModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : Union[str, Any] ):
return
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase__ ( self : int ):
pass
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(lowerCamelCase_ )
_lowerCAmelCase =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCAmelCase__ ( self : int ):
def check_hidden_states_output(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
_lowerCAmelCase =model_class(lowerCamelCase_ )
_lowerCAmelCase =model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
_lowerCAmelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase =model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase =model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=lowerCamelCase_ , return_tensors="""np""" )
_lowerCAmelCase =model(**lowerCamelCase_ )
# verify the logits
_lowerCAmelCase =(1, 1000)
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
_lowerCAmelCase =jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 149 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowercase_ = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
lowercase_ = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def lowerCAmelCase ( ) ->List[Any]:
"""simple docstring"""
__magic_name__ : Tuple = calculate_rouge(UpperCAmelCase, UpperCAmelCase, bootstrap_aggregation=UpperCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(UpperCAmelCase, UpperCAmelCase )
__magic_name__ : str = calculate_rouge(UpperCAmelCase, UpperCAmelCase, bootstrap_aggregation=UpperCAmelCase, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : Optional[int] = '''rougeLsum'''
__magic_name__ : Dict = calculate_rouge(UpperCAmelCase, UpperCAmelCase, newline_sep=UpperCAmelCase, rouge_keys=[k] )[k]
__magic_name__ : Tuple = calculate_rouge(UpperCAmelCase, UpperCAmelCase, newline_sep=UpperCAmelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase ( ) ->Any:
"""simple docstring"""
__magic_name__ : Dict = ['''rouge1''', '''rouge2''', '''rougeL''']
__magic_name__ : Union[str, Any] = calculate_rouge(UpperCAmelCase, UpperCAmelCase, newline_sep=UpperCAmelCase, rouge_keys=UpperCAmelCase )
__magic_name__ : Optional[int] = calculate_rouge(UpperCAmelCase, UpperCAmelCase, newline_sep=UpperCAmelCase, rouge_keys=UpperCAmelCase )
assert score_sep == score_no_sep
def lowerCAmelCase ( ) ->Union[str, Any]:
"""simple docstring"""
__magic_name__ : int = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
__magic_name__ : Dict = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(UpperCAmelCase, UpperCAmelCase, newline_sep=UpperCAmelCase ) == calculate_rouge(UpperCAmelCase, UpperCAmelCase, newline_sep=UpperCAmelCase )
def lowerCAmelCase ( ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
__magic_name__ : int = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
__magic_name__ : List[str] = calculate_rouge(UpperCAmelCase, UpperCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=UpperCAmelCase )['''rougeLsum''']
__magic_name__ : str = calculate_rouge(UpperCAmelCase, UpperCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowerCAmelCase ( ) ->List[str]:
"""simple docstring"""
__magic_name__ : Any = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
__magic_name__ : List[Any] = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(UpperCAmelCase, UpperCAmelCase )
__magic_name__ : int = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=UpperCAmelCase )
assert isinstance(UpperCAmelCase, UpperCAmelCase )
| 154 |
def lowerCAmelCase ( UpperCAmelCase = 6008_5147_5143 ) ->int:
"""simple docstring"""
try:
__magic_name__ : Optional[int] = int(UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__magic_name__ : List[str] = 2
__magic_name__ : Optional[int] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__magic_name__ : Any = i
while n % i == 0:
__magic_name__ : Union[str, Any] = n // i
i += 1
return int(UpperCAmelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 154 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCamelCase__ =TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCamelCase__ =TaTokenizerFast
UpperCamelCase__ ={'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCamelCase__ =_LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ ={
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 381 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = BertTokenizer
lowercase__ = BertTokenizerFast
lowercase__ = True
lowercase__ = True
lowercase__ = filter_non_english
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class(self.vocab_file)
_UpperCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [9, 6, 7, 12, 10, 11])
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = tokenizer.tokenize(__a)
_UpperCamelCase = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(__a)
_UpperCamelCase = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
# With lower casing
_UpperCamelCase = self.get_tokenizer(do_lower_case=__a)
_UpperCamelCase = self.get_rust_tokenizer(do_lower_case=__a)
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = tokenizer.tokenize(__a)
_UpperCamelCase = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a)
_UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(__a)
_UpperCamelCase = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''h\u00E9llo'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BasicTokenizer()
_UpperCamelCase = '''a\n\'ll !!to?\'d of, can\'t.'''
_UpperCamelCase = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__a) , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_UpperCamelCase = {}
for i, token in enumerate(__a):
_UpperCamelCase = i
_UpperCamelCase = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') , ['''[UNK]''', '''runn''', '''##ing'''])
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
self.assertListEqual(
[rust_tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']])
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained('''bert-base-uncased''')
_UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a)
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
_UpperCamelCase = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_UpperCamelCase = tokenizer_r.do_lower_case if hasattr(__a , '''do_lower_case''') else False
_UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''])
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ['''的''', '''人''', '''有''']
_UpperCamelCase = ''''''.join(__a)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
_UpperCamelCase = False
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a)
_UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a)
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
| 19 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
import argparse
import os
import re
snake_case_ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
snake_case_ = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
snake_case_ = re.compile(R'\s*\(\s*\"(\S[^\"]+)\"')
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] = False ) -> List[Any]:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(UpperCamelCase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(UpperCamelCase__ , key=lambda snake_case_ : _re_identifier.search(UpperCamelCase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(UpperCamelCase__ ) )
elif "\n".join(UpperCamelCase__ ) != content:
return True
def lowerCamelCase__ ( snake_case_ : Union[str, Any] = False ) -> List[str]:
__snake_case = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for f in os.listdir(UpperCamelCase__ ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(UpperCamelCase__ , overwrite=UpperCamelCase__ ) for fname in fnames]
if not overwrite and any(UpperCamelCase__ ):
__snake_case = [f for f, d in zip(UpperCamelCase__ , UpperCamelCase__ ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {', '.join(UpperCamelCase__ )}. Run `make style` to fix"""
''' this.''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
snake_case_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 720 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : Optional[Any] , **a__ : Optional[Any] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a (self : str , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ObjectDetectionPipeline(model=a__ , image_processor=a__ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a (self : List[str] , a__ : Optional[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(a__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a__ , {
'''score''': ANY(a__ ),
'''label''': ANY(a__ ),
'''box''': {'''xmin''': ANY(a__ ), '''ymin''': ANY(a__ ), '''xmax''': ANY(a__ ), '''ymax''': ANY(a__ )},
} , )
import datasets
__snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__snake_case = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__snake_case = object_detector(a__ , threshold=0.0 )
self.assertEqual(len(a__ ) , len(a__ ) )
for outputs in batch_outputs:
self.assertGreater(len(a__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a__ , {
'''score''': ANY(a__ ),
'''label''': ANY(a__ ),
'''box''': {'''xmin''': ANY(a__ ), '''ymin''': ANY(a__ ), '''xmax''': ANY(a__ ), '''ymax''': ANY(a__ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
def a (self : Any ):
"""simple docstring"""
__snake_case = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__snake_case = AutoModelForObjectDetection.from_pretrained(a__ )
__snake_case = AutoFeatureExtractor.from_pretrained(a__ )
__snake_case = ObjectDetectionPipeline(model=a__ , feature_extractor=a__ )
__snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
__snake_case = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def a (self : int ):
"""simple docstring"""
__snake_case = '''facebook/detr-resnet-50'''
__snake_case = AutoModelForObjectDetection.from_pretrained(a__ )
__snake_case = AutoFeatureExtractor.from_pretrained(a__ )
__snake_case = ObjectDetectionPipeline(model=a__ , feature_extractor=a__ )
__snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__snake_case = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = '''facebook/detr-resnet-50'''
__snake_case = pipeline('''object-detection''' , model=a__ )
__snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
__snake_case = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def a (self : str ):
"""simple docstring"""
__snake_case = 0.9_9_8_5
__snake_case = '''facebook/detr-resnet-50'''
__snake_case = pipeline('''object-detection''' , model=a__ )
__snake_case = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=a__ )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def a (self : Dict ):
"""simple docstring"""
__snake_case = '''Narsil/layoutlmv3-finetuned-funsd'''
__snake_case = 0.9_9_9_3
__snake_case = pipeline('''object-detection''' , model=a__ , threshold=a__ )
__snake_case = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 388 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: List[str] = logging.get_logger(__name__)
lowerCAmelCase: Union[str, Any] = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class a__( lowerCamelCase__ ):
lowercase__ = """lilt"""
def __init__( self : str , __snake_case : Dict=3_05_22 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[Any]=12 , __snake_case : Dict=30_72 , __snake_case : Tuple="gelu" , __snake_case : Dict=0.1 , __snake_case : List[Any]=0.1 , __snake_case : List[str]=5_12 , __snake_case : List[str]=2 , __snake_case : str=0.02 , __snake_case : Dict=1e-1_2 , __snake_case : int=0 , __snake_case : Any="absolute" , __snake_case : Tuple=None , __snake_case : Tuple=4 , __snake_case : List[str]=10_24 , **__snake_case : Union[str, Any] , ):
super().__init__(pad_token_id=__snake_case , **__snake_case )
a : str = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Tuple = num_attention_heads
a : Optional[Any] = hidden_act
a : Union[str, Any] = intermediate_size
a : Optional[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Any = type_vocab_size
a : Union[str, Any] = initializer_range
a : int = layer_norm_eps
a : Any = position_embedding_type
a : Any = classifier_dropout
a : str = channel_shrink_ratio
a : Optional[Any] = max_ad_position_embeddings | 526 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase: Dict = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase: Union[str, Any] = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCAmelCase: Optional[Any] = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase: int = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase: List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _A )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( ):
a : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a : Union[str, Any] = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
a : Union[str, Any] = collections.defaultdict(_A )
a : Optional[Any] = collections.defaultdict(_A )
a : int = collections.defaultdict(_A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_A ):
a : Optional[Any] = None
if _re_tf_models.match(_A ) is not None:
a : List[str] = tf_models
a : str = _re_tf_models.match(_A ).groups()[0]
elif _re_flax_models.match(_A ) is not None:
a : Optional[int] = flax_models
a : List[Any] = _re_flax_models.match(_A ).groups()[0]
elif _re_pt_models.match(_A ) is not None:
a : int = pt_models
a : List[str] = _re_pt_models.match(_A ).groups()[0]
if lookup_dict is not None:
while len(_A ) > 0:
if attr_name in model_prefix_to_model_type:
a : Optional[Any] = True
break
# Try again after removing the last word in the name
a : List[str] = ''.join(camel_case_split(_A )[:-1] )
a : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
a : int = list(_A )
all_models.sort()
a : Optional[Any] = {'model_type': all_models}
a : List[str] = [pt_models[t] for t in all_models]
a : str = [tf_models[t] for t in all_models]
a : int = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
a : Any = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
a : Optional[int] = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
a : Union[str, Any] = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
a : Tuple = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
a : str = 'AutoTokenizer'
a : str = [processors[t] for t in all_models]
return pd.DataFrame(_A )
def lowerCamelCase__ ( _A ):
a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
a : Optional[Any] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
a : Optional[int] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_A , _A , _A ):
# The type of pipeline may not exist in this framework
if not hasattr(_A , _A ):
continue
# First extract all model_names
a : List[str] = []
for name in getattr(_A , _A ).values():
if isinstance(_A , _A ):
model_names.append(_A )
else:
model_names.extend(list(_A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = get_frameworks_table()
a : List[str] = Dataset.from_pandas(_A )
a : Optional[Any] = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=_A )
a : List[Any] = Dataset.from_json(_A )
a : Optional[Any] = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(_A ) )
}
a : Dict = update_pipeline_and_auto_class_table(_A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
a : str = sorted(table.keys() )
a : List[Any] = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
a : Optional[Any] = Dataset.from_pandas(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_A , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(_A , 'pipeline_tags.json' ) )
if commit_sha is not None:
a : int = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
a : Optional[int] = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=_A , repo_type='dataset' , token=_A , commit_message=_A , )
def lowerCamelCase__ ( ):
a : int = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
a : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
a : str = []
for key in pipeline_tasks:
if key not in in_table:
a : Dict = pipeline_tasks[key]['pt']
if isinstance(_A , (list, tuple) ):
a : List[Any] = model[0]
a : Union[str, Any] = model.__name__
if model not in in_table.values():
missing.append(_A )
if len(_A ) > 0:
a : List[str] = ', '.join(_A )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCAmelCase: Optional[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha) | 526 | 1 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def _lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ = Github(os.environ["GITHUB_TOKEN"] )
A__ = g.get_repo("huggingface/diffusers" )
A__ = repo.get_issues(state="open" )
for issue in open_issues:
A__ = sorted(issue.get_comments(), key=lambda UpperCAmelCase_ : i.created_at, reverse=__a )
A__ = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 720 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCamelCase = 2048
UpperCamelCase = 4096
UpperCamelCase = 42
UpperCamelCase = os.environ.pop("""PROCESS_TRAIN""", """false""")
UpperCamelCase = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def _lowerCamelCase ( UpperCAmelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
def choose_first(UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : str=False ):
assert isinstance(UpperCAmelCase_, UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 1:
A__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
A__ = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
A__ = {"id": example["id"]}
A__ = example["annotations"]
A__ = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
A__ = ["yes"] if 1 in yes_no_answer else ["no"]
A__ = A__ = []
A__ = A__ = []
A__ = ["<cls>"]
else:
A__ = ["short"]
A__ = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
A__ = ["long"]
A__ = choose_first(annotation["long_answer"], is_long_answer=UpperCAmelCase_ )
A__ = []
answer.update(UpperCAmelCase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
A__ = True
else:
A__ = False
A__ = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k], UpperCAmelCase_ ) for k in cols ):
raise ValueError("Issue in ID", example["id"] )
return answer
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Any=False ) -> Optional[Any]:
"""simple docstring"""
A__ = _get_single_answer(UpperCAmelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = example["document"]["tokens"]
A__ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
A__ = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
A__ = example["document"]["tokens"]
A__ = answer["start_token"]
A__ = answer["end_token"]
A__ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
A__ = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
A__ = doc["is_html"][answer["start_token"] : answer["end_token"]]
A__ = doc["token"][answer["start_token"] : answer["end_token"]]
A__ = " ".join([old[i] for i in range(len(UpperCAmelCase_ ) ) if not is_html[i]] )
if new != old:
print("ID:", example["id"] )
print("New:", UpperCAmelCase_, end="\n" )
print("Old:", UpperCAmelCase_, end="\n\n" )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[str]=2048, UpperCAmelCase_ : Union[str, Any]=4096, UpperCAmelCase_ : Optional[int]=True ) -> str:
"""simple docstring"""
A__ = get_context_and_ans(UpperCAmelCase_, assertion=UpperCAmelCase_ )
A__ = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
A__ = tokenizer(example["question"]["text"], out["context"] ).input_ids
A__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = []
A__ = []
A__ = input_ids[:q_len]
A__ = range(UpperCAmelCase_, len(UpperCAmelCase_ ), max_length - doc_stride )
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(UpperCAmelCase_ ),
"end_token": [-100] * len(UpperCAmelCase_ ),
"category": category,
},
}
A__ = out["context"].split()
A__ = splitted_context[answer["end_token"]]
A__ = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ), add_special_tokens=UpperCAmelCase_, ).input_ids )
A__ = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ), add_special_tokens=UpperCAmelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
A__ = len(tokenizer(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
A__ = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
A__ = answer["start_token"]
A__ = answer["end_token"]
if assertion:
A__ = tokenizer.decode(UpperCAmelCase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:", answer["span"] )
print("NEW:", UpperCAmelCase_, end="\n\n" )
if len(UpperCAmelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
A__ = input_ids[:q_len]
A__ = range(UpperCAmelCase_, len(UpperCAmelCase_ ), max_length - doc_stride )
A__ = []
A__ = []
A__ = []
A__ = [] # null, yes, no, long, short
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
A__ = start_token - i + q_len
A__ = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
A__ = -100
A__ = -100
answers_category.append("null" )
A__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCAmelCase_ )
answers_end_token.append(UpperCAmelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:", example["id"] )
print("New:", tokenizer.decode(UpperCAmelCase_ ) )
print("Old:", tokenizer.decode(UpperCAmelCase_ ), end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : str, UpperCAmelCase_ : Union[str, Any]=2048, UpperCAmelCase_ : Tuple=4096, UpperCAmelCase_ : int=False ) -> List[Any]:
"""simple docstring"""
A__ = get_strided_contexts_and_ans(
UpperCAmelCase_, UpperCAmelCase_, doc_stride=UpperCAmelCase_, max_length=UpperCAmelCase_, assertion=UpperCAmelCase_, )
return example
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
with jsonlines.open(UpperCAmelCase_, "a" ) as writer:
for example in tqdm(UpperCAmelCase_, total=len(UpperCAmelCase_ ), desc="Saving samples ... " ):
A__ = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"], labels["start_token"], labels["end_token"], labels["category"], ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCamelCase = load_dataset("""natural_questions""")
UpperCamelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
UpperCamelCase = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
UpperCamelCase = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCamelCase = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
UpperCamelCase = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 562 | 0 |
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> int:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(snake_case_ )
if number < 1:
_lowerCAmelCase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(snake_case_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_lowerCAmelCase = int(math.log(number // 3 , 2 ) ) + 2
_lowerCAmelCase = [3, 5]
_lowerCAmelCase = 2
_lowerCAmelCase = 3
for block in range(1 , snake_case_ ):
for _ in range(snake_case_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
SCREAMING_SNAKE_CASE : Optional[int] = 0
try:
SCREAMING_SNAKE_CASE : List[str] = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}') | 156 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (KDPMaDiscreteScheduler,)
__UpperCamelCase = 10
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCamelCase )
return config
def A__ (self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(lowerCamelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def A__ (self ):
'''simple docstring'''
if torch_device == "mps":
return
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(lowerCamelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def A__ (self ):
'''simple docstring'''
if torch_device == "mps":
return
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = model(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = output.prev_sample
_lowerCAmelCase = torch.sum(torch.abs(lowerCamelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(lowerCamelCase ) )
if str(lowerCamelCase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3 | 156 | 1 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[str] = 'T5Config'
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : str = "mt5"
UpperCamelCase : Union[str, Any] = MTaConfig
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : str = "mt5"
UpperCamelCase : Dict = MTaConfig
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Tuple = "mt5"
UpperCamelCase : Union[str, Any] = MTaConfig
| 339 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
snake_case : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
snake_case : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
re.sub("""<n>""" , """""" , __UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCamelCase ) )
| 339 | 1 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
__a , __a : Union[str, Any] = grid.shape
__a : Tuple = [-1, 1, 0, 0]
__a : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__a , __a : Tuple = [(0, source)], set()
__a : List[str] = np.full((rows, cols) , np.inf )
__a : List[Any] = 0
__a : List[str] = np.empty((rows, cols) , dtype=_lowerCamelCase )
__a : Tuple = None
while queue:
((__a) , (__a)) : Optional[Any] = heappop(_lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__a : int = []
while (x, y) != source:
path.append((x, y) )
__a , __a : Tuple = predecessors[x, y]
path.append(_lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowerCamelCase ) ):
__a , __a : List[str] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__a : str = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowerCamelCase , (dist + 1, (nx, ny)) )
__a : int = dist + 1
__a : Tuple = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597 | '''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCAmelCase = Sql(
cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , sql=__lowerCAmelCase , con=__lowerCAmelCase , **__lowerCAmelCase , )
def a ( self : str ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , )
# Build dataset for splits
_lowerCAmelCase = self.builder.as_dataset(
split='train' , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
_lowerCAmelCase = dataset
_lowerCAmelCase = name
_lowerCAmelCase = con
_lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase = num_proc
_lowerCAmelCase = to_sql_kwargs
def a ( self : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = self.to_sql_kwargs.pop('sql' , __lowerCAmelCase )
_lowerCAmelCase = self.to_sql_kwargs.pop('con' , __lowerCAmelCase )
_lowerCAmelCase = self.to_sql_kwargs.pop('index' , __lowerCAmelCase )
_lowerCAmelCase = self._write(index=__lowerCAmelCase , **self.to_sql_kwargs )
return written
def a ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = args
_lowerCAmelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_lowerCAmelCase = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCAmelCase = batch.to_pandas()
_lowerCAmelCase = df.to_sql(self.name , self.con , index=__lowerCAmelCase , **__lowerCAmelCase )
return num_rows or len(__lowerCAmelCase )
def a ( self : Dict , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowerCAmelCase , _lowerCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 309 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_lowerCAmelCase = "CompVis/stable-diffusion-v1-1"
_lowerCAmelCase = "CompVis/stable-diffusion-v1-2"
_lowerCAmelCase = "CompVis/stable-diffusion-v1-3"
_lowerCAmelCase = "CompVis/stable-diffusion-v1-4"
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Dict , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _A : StableDiffusionSafetyChecker , _A : CLIPImageProcessor , _A : bool = True , ):
super()._init_()
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(_A )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(_A )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(_A )
_UpperCamelCase = StableDiffusionPipeline(
vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , safety_checker=_A , feature_extractor=_A , requires_safety_checker=_A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return {k: getattr(self , _A ) for k in self.config.keys() if not k.startswith('''_''' )}
def UpperCamelCase_ ( self : List[str] , _A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def UpperCamelCase_ ( self : Any ):
self.enable_attention_slicing(_A )
@torch.no_grad()
def UpperCamelCase_ ( self : Optional[Any] , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Dict , ):
return self.pipea(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
@torch.no_grad()
def UpperCamelCase_ ( self : Any , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Optional[Any] , ):
return self.pipea(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
@torch.no_grad()
def UpperCamelCase_ ( self : List[str] , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : List[Any] , ):
return self.pipea(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
@torch.no_grad()
def UpperCamelCase_ ( self : Dict , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : Optional[int] , ):
return self.pipea(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
@torch.no_grad()
def UpperCamelCase_ ( self : Dict , _A : Union[str, List[str]] , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : List[Any] , ):
_UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCamelCase = self.textaimg_sda_a(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCamelCase = self.textaimg_sda_a(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCamelCase = self.textaimg_sda_a(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCamelCase = self.textaimg_sda_a(
prompt=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , **_A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 710 | import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase_ :
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.get_dummy_input()
@property
def UpperCamelCase_ ( self : Dict ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str]=True , _A : Any=False , _A : Union[str, Any]=False , _A : int=False , ):
_UpperCamelCase = 4
_UpperCamelCase = 32
_UpperCamelCase = (32, 32)
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = (batch_size, num_channels) + sizes
_UpperCamelCase = randn_tensor(_A , generator=_A , device=_A )
_UpperCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
_UpperCamelCase = 128
_UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=_A , device=_A )
if include_res_hidden_states_tuple:
_UpperCamelCase = torch.manual_seed(1 )
_UpperCamelCase = (randn_tensor(_A , generator=_A , device=_A ),)
if include_encoder_hidden_states:
_UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(_A )
if include_skip_sample:
_UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_A , device=_A )
return dummy_input
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
_UpperCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Tuple , _A : Union[str, Any] ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
unet_block.to(_A )
unet_block.eval()
with torch.no_grad():
_UpperCamelCase = unet_block(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
_UpperCamelCase = output[0, -1, -3:, -3:]
_UpperCamelCase = torch.tensor(_A ).to(_A )
assert torch_all_close(output_slice.flatten() , _A , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase , _UpperCamelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase = self.block_class(**_A )
model.to(_A )
model.train()
_UpperCamelCase = model(**_A )
if isinstance(_A , _A ):
_UpperCamelCase = output[0]
_UpperCamelCase = torch.device(_A )
_UpperCamelCase = randn_tensor(output.shape , device=_A )
_UpperCamelCase = torch.nn.functional.mse_loss(_A , _A )
loss.backward()
| 71 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] ):
# Initialise PyTorch model
UpperCAmelCase : Optional[int] = MobileBertConfig.from_json_file(UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
UpperCAmelCase : Dict = MobileBertForPreTraining(UpperCamelCase )
# Load weights from tf checkpoint
UpperCAmelCase : List[str] = load_tf_weights_in_mobilebert(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
A: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A: List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 160 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="divided_space_time" , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : str = image_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : int = patch_size
UpperCAmelCase : Union[str, Any] = num_frames
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : int = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = attention_type
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Tuple = scope
UpperCAmelCase : Optional[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase : Dict = (image_size // patch_size) ** 2
UpperCAmelCase : int = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCAmelCase : Union[str, Any] = self.num_labels
return config
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = TimesformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = TimesformerForVideoClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
# verify the logits shape
UpperCAmelCase : Optional[int] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowerCAmelCase : List[str] = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = TimesformerModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = TimesformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = self.model_tester.seq_length
UpperCAmelCase : Dict = self.model_tester.num_frames
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Tuple = False
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : int = True
UpperCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCAmelCase : int = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCAmelCase : str = True
UpperCAmelCase : int = True
UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : int = outputs.hidden_states
UpperCAmelCase : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( ):
UpperCAmelCase : int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
UpperCAmelCase : int = np.load(UpperCamelCase )
return list(UpperCamelCase )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Optional[int] = prepare_video()
UpperCAmelCase : List[Any] = image_processor(video[:8] , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 160 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class a ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , A_=None , **A_ ):
'''simple docstring'''
super().__init__(features=A_ )
_UpperCAmelCase : Dict = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
import torch
if isinstance(A_ , A_ ) and column:
if all(
isinstance(A_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A_ )
return column
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
import torch
if isinstance(A_ , (str, bytes, type(A_ )) ):
return value
elif isinstance(A_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase : Dict = {}
if isinstance(A_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_UpperCAmelCase : List[Any] = {"dtype": torch.intaa}
elif isinstance(A_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase : int = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A_ , PIL.Image.Image ):
_UpperCAmelCase : str = np.asarray(A_ )
return torch.tensor(A_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(A_ , "__array__" ) and not isinstance(A_ , torch.Tensor ):
_UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A_ ) for substruct in data_struct] )
elif isinstance(A_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A_ ) for substruct in data_struct] )
return self._tensorize(A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , A_ , map_list=A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(A_ )
_UpperCAmelCase : Any = self.python_features_decoder.decode_row(A_ )
return self.recursive_tensorize(A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = self.numpy_arrow_extractor().extract_column(A_ )
_UpperCAmelCase : List[str] = self.python_features_decoder.decode_column(A_ , pa_table.column_names[0] )
_UpperCAmelCase : List[Any] = self.recursive_tensorize(A_ )
_UpperCAmelCase : int = self._consolidate(A_ )
return column
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.numpy_arrow_extractor().extract_batch(A_ )
_UpperCAmelCase : Tuple = self.python_features_decoder.decode_batch(A_ )
_UpperCAmelCase : Optional[int] = self.recursive_tensorize(A_ )
for column_name in batch:
_UpperCAmelCase : List[str] = self._consolidate(batch[column_name] )
return batch
| 467 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( UpperCAmelCase ):
_lowercase = "megatron-bert"
def __init__( self , A_=29056 , A_=1024 , A_=24 , A_=16 , A_=4096 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=0 , A_="absolute" , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , **A_ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Tuple = position_embedding_type
_UpperCAmelCase : Tuple = use_cache
| 467 | 1 |
import string
import numpy
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE )
class lowerCAmelCase_ :
_UpperCamelCase : Any = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_UpperCamelCase : Tuple = numpy.vectorize(lambda __snake_case : x % 36 )
_UpperCamelCase : Any = numpy.vectorize(__snake_case )
def __init__( self , _lowerCAmelCase ):
_lowercase : str = self.modulus(_lowerCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowercase : int = encrypt_key.shape[0]
def __a ( self , _lowerCAmelCase ):
return self.key_string.index(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
return self.key_string[round(_lowerCAmelCase )]
def __a ( self ):
_lowercase : str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase : int = det % len(self.key_string )
_lowercase : List[str] = len(self.key_string )
if greatest_common_divisor(_lowerCAmelCase , len(self.key_string ) ) != 1:
_lowercase : str = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = [char for char in text.upper() if char in self.key_string]
_lowercase : Union[str, Any] = chars[-1]
while len(_lowerCAmelCase ) % self.break_key != 0:
chars.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[int] = self.process_text(text.upper() )
_lowercase : Dict = ''
for i in range(0 , len(_lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
_lowercase : List[Any] = text[i : i + self.break_key]
_lowercase : str = [self.replace_letters(_lowerCAmelCase ) for char in batch]
_lowercase : Union[str, Any] = numpy.array([vec] ).T
_lowercase : List[Any] = self.modulus(self.encrypt_key.dot(_lowerCAmelCase ) ).T.tolist()[
0
]
_lowercase : Any = ''.join(
self.replace_digits(_lowerCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __a ( self ):
_lowercase : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowercase : Any = det % len(self.key_string )
_lowercase : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowercase : Union[str, Any] = i
break
_lowercase : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCAmelCase ) )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[int] = self.make_decrypt_key()
_lowercase : Union[str, Any] = self.process_text(text.upper() )
_lowercase : List[str] = ''
for i in range(0 , len(_lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
_lowercase : List[Any] = text[i : i + self.break_key]
_lowercase : Any = [self.replace_letters(_lowerCAmelCase ) for char in batch]
_lowercase : Optional[int] = numpy.array([vec] ).T
_lowercase : int = self.modulus(decrypt_key.dot(_lowerCAmelCase ) ).T.tolist()[0]
_lowercase : str = ''.join(
self.replace_digits(_lowerCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __magic_name__ ( ) -> None:
_lowercase : Any = int(input('Enter the order of the encryption key: ' ) )
_lowercase : Any = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = [int(SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(SCREAMING_SNAKE_CASE )
_lowercase : int = HillCipher(numpy.array(SCREAMING_SNAKE_CASE ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_lowercase : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_lowercase : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(SCREAMING_SNAKE_CASE ) )
elif option == "2":
_lowercase : List[str] = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 66 |
from __future__ import annotations
UpperCamelCase = tuple[int, int, int]
UpperCamelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCamelCase = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCamelCase = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCamelCase = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCamelCase = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCamelCase = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCamelCase = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCamelCase = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCamelCase = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCamelCase = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCamelCase = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(SCREAMING_SNAKE_CASE ) )) < 3:
_lowercase : Optional[int] = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
_lowercase , _lowercase , _lowercase : int = rotpos
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : Dict = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Validates string and returns dict
_lowercase : Tuple = _plugboard(SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = F"""Plugboard setting isn't type string ({type(SCREAMING_SNAKE_CASE )})"""
raise TypeError(SCREAMING_SNAKE_CASE )
elif len(SCREAMING_SNAKE_CASE ) % 2 != 0:
_lowercase : Optional[int] = F"""Odd number of symbols ({len(SCREAMING_SNAKE_CASE )})"""
raise Exception(SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_lowercase : Dict = set()
for i in pbstring:
if i not in abc:
_lowercase : str = F"""'{i}' not in list of symbols"""
raise Exception(SCREAMING_SNAKE_CASE )
elif i in tmppbl:
_lowercase : int = F"""Duplicate symbol ({i})"""
raise Exception(SCREAMING_SNAKE_CASE )
else:
tmppbl.add(SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
_lowercase : Optional[Any] = {}
for j in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ):
_lowercase : Dict = pbstring[j + 1]
_lowercase : Union[str, Any] = pbstring[j]
return pb
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE = "" , ) -> str:
_lowercase : List[str] = text.upper()
_lowercase , _lowercase , _lowercase : List[str] = _validator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , plugb.upper() )
_lowercase , _lowercase , _lowercase : Optional[int] = rotor_position
_lowercase , _lowercase , _lowercase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowercase : Optional[int] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowercase : Dict = plugboard[symbol]
# rotor ra --------------------------
_lowercase : Optional[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : Union[str, Any] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
_lowercase : Tuple = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : str = rotora[index % len(SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
_lowercase : List[Any] = abc.index(SCREAMING_SNAKE_CASE ) + rotorposa
_lowercase : List[str] = rotora[index % len(SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowercase : List[str] = reflector[symbol]
# 2nd rotors
_lowercase : List[str] = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Tuple = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
_lowercase : Dict = abc[rotora.index(SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowercase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : int = 0
rotorposa += 1
if rotorposa >= len(SCREAMING_SNAKE_CASE ):
_lowercase : Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = "This is my Python script that emulates the Enigma machine from WWII."
UpperCamelCase = (1, 1, 1)
UpperCamelCase = "pictures"
UpperCamelCase = (rotora, rotora, rotora)
UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 66 | 1 |
'''simple docstring'''
import argparse
import os
import re
a : Optional[Any] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a : Dict = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
a : List[Any] = re.compile(R'''\s*\(\s*\"(\S[^\"]+)\"''')
def lowercase_ ( _UpperCamelCase , _UpperCamelCase = False ):
'''simple docstring'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
__lowercase = f.read()
__lowercase = content.split('''\n''' )
__lowercase = []
__lowercase = 0
while line_idx < len(UpperCamelCase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__lowercase = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__lowercase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__lowercase = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__lowercase = sorted(UpperCamelCase__ , key=lambda _UpperCamelCase : _re_identifier.search(UpperCamelCase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(UpperCamelCase__ ) )
elif "\n".join(UpperCamelCase__ ) != content:
return True
def lowercase_ ( _UpperCamelCase = False ):
'''simple docstring'''
__lowercase = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for f in os.listdir(UpperCamelCase__ ) if f.endswith('''.py''' )]
__lowercase = [sort_auto_mapping(UpperCamelCase__ , overwrite=UpperCamelCase__ ) for fname in fnames]
if not overwrite and any(UpperCamelCase__ ):
__lowercase = [f for f, d in zip(UpperCamelCase__ , UpperCamelCase__ ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(UpperCamelCase__ )}. Run `make style` to fix'
''' this.''' )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
a : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 720 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : Optional[int] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : Union[str, Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCamelCase ) - np.asarray(_UpperCamelCase )) ** 2 ) )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCamelCase , _UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase_ ( ):
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 527 | 0 |
"""simple docstring"""
from itertools import product
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :str = sides_number
snake_case_ :Optional[Any] = max_face_number * dice_number
snake_case_ :Tuple = [0] * (max_total + 1)
snake_case_ :List[str] = 1
snake_case_ :str = range(_A, max_face_number + 1 )
for dice_numbers in product(_A, repeat=_A ):
snake_case_ :List[str] = sum(_A )
totals_frequencies[total] += 1
return totals_frequencies
def A ( ):
"""simple docstring"""
snake_case_ :Any = total_frequency_distribution(
sides_number=4, dice_number=9 )
snake_case_ :Union[str, Any] = total_frequency_distribution(
sides_number=6, dice_number=6 )
snake_case_ :Optional[int] = 0
snake_case_ :Any = 9
snake_case_ :int = 4 * 9
snake_case_ :Dict = 6
for peter_total in range(_A, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case_ :int = (4**9) * (6**6)
snake_case_ :Dict = peter_wins_count / total_games_number
snake_case_ :str = round(_A, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 584 |
"""simple docstring"""
from collections import deque
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
a__ = process_name # process name
a__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
a__ = arrival_time
a__ = burst_time # remaining burst time
a__ = 0 # total time of the process wait in ready queue
a__ = 0 # time from arrival time to completion time
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
a__ = number_of_queues
# time slice of queues that round robin algorithm applied
a__ = time_slices
# unfinished process is in this ready_queue
a__ = queue
# current time
a__ = current_time
# finished process is in this sequence queue
a__ = deque()
def lowercase__ ( self ) -> list[str]:
"""simple docstring"""
a__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> deque[Process]:
"""simple docstring"""
a__ = deque() # sequence deque of finished process
while len(__SCREAMING_SNAKE_CASE ) != 0:
a__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__SCREAMING_SNAKE_CASE )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
a__ = 0
# set the process's turnaround time because it is finished
a__ = self.current_time - cp.arrival_time
# set the completion time
a__ = self.current_time
# add the process to queue that has finished queue
finished.append(__SCREAMING_SNAKE_CASE )
self.finish_queue.extend(__SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
a__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
a__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__SCREAMING_SNAKE_CASE )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
a__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__SCREAMING_SNAKE_CASE )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
a__ = 0
# set the finish time
a__ = self.current_time
# update the process' turnaround time because it is finished
a__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__SCREAMING_SNAKE_CASE )
self.finish_queue.extend(__SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowercase__ ( self ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
a__ , a__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a : List[str] = Process('P1', 0, 53)
a : Optional[int] = Process('P2', 0, 17)
a : Union[str, Any] = Process('P3', 0, 68)
a : Optional[int] = Process('P4', 0, 24)
a : Optional[int] = 3
a : Optional[Any] = [17, 25]
a : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
a : Union[str, Any] = Process('P1', 0, 53)
a : Optional[int] = Process('P2', 0, 17)
a : Optional[Any] = Process('P3', 0, 68)
a : str = Process('P4', 0, 24)
a : Optional[Any] = 3
a : Tuple = [17, 25]
a : int = deque([Pa, Pa, Pa, Pa])
a : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
a : Optional[int] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 273 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=64 , lowerCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.default_rng(lowerCamelCase__ )
__lowerCamelCase = length
__lowerCamelCase = rng.normal(size=(length,) ).astype(np.floataa )
__lowerCamelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.length
def __getitem__( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class __lowerCAmelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCamelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCamelCase = True
def lowercase_ ( self , lowerCamelCase__=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCamelCase = False
return x * self.a[0] + self.b[0]
class __lowerCAmelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__lowerCamelCase = torch.nn.Parameter(torch.tensor(lowerCamelCase__ ).float() )
__lowerCamelCase = torch.nn.Parameter(torch.tensor(lowerCamelCase__ ).float() )
__lowerCamelCase = True
def lowercase_ ( self , lowerCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCamelCase = False
return x * self.a + self.b
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int = 16 ) -> Union[str, Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
__lowerCamelCase = load_dataset('csv' , data_files=UpperCamelCase__ )
__lowerCamelCase = datasets['train'].unique('label' )
__lowerCamelCase = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(UpperCamelCase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
if "label" in examples:
__lowerCamelCase = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(UpperCamelCase__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCamelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
__lowerCamelCase = DataLoader(tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 167 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__A = logging.getLogger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''summarization'''
snake_case_ = ['''loss''']
snake_case_ = ROUGE_KEYS
snake_case_ = '''rouge2'''
def __init__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
__lowerCamelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(lowerCamelCase__ , num_labels=lowerCamelCase__ , mode=self.mode , **lowerCamelCase__ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
__lowerCamelCase = Path(self.output_dir ) / 'metrics.json'
__lowerCamelCase = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
__lowerCamelCase = 0
__lowerCamelCase = defaultdict(lowerCamelCase__ )
__lowerCamelCase = self.config.model_type
__lowerCamelCase = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
__lowerCamelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__lowerCamelCase = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
__lowerCamelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__lowerCamelCase = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__lowerCamelCase = get_git_info()['repo_sha']
__lowerCamelCase = hparams.num_workers
__lowerCamelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase__ ):
__lowerCamelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__lowerCamelCase = self.decoder_start_token_id
__lowerCamelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
__lowerCamelCase = False
__lowerCamelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__lowerCamelCase = self.hparams.eval_max_gen_length
else:
__lowerCamelCase = self.model.config.max_length
__lowerCamelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowercase_ ( self , lowerCamelCase__ ) -> Dict[str, List[str]]:
'''simple docstring'''
__lowerCamelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase__ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
__lowerCamelCase = True
return readable_batch
def lowercase_ ( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.model(lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.tokenizer.batch_decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return lmap(str.strip , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.tokenizer.pad_token_id
__lowerCamelCase , __lowerCamelCase = batch['input_ids'], batch['attention_mask']
__lowerCamelCase = batch['labels']
if isinstance(self.model , lowerCamelCase__ ):
__lowerCamelCase = self.model._shift_right(lowerCamelCase__ )
else:
__lowerCamelCase = shift_tokens_right(lowerCamelCase__ , lowerCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__lowerCamelCase = decoder_input_ids
self.save_readable_batch(lowerCamelCase__ )
__lowerCamelCase = self(lowerCamelCase__ , attention_mask=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowerCamelCase = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__lowerCamelCase = nn.CrossEntropyLoss(ignore_index=lowerCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
__lowerCamelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__lowerCamelCase = nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
__lowerCamelCase , __lowerCamelCase = label_smoothed_nll_loss(
lowerCamelCase__ , lowerCamelCase__ , self.hparams.label_smoothing , ignore_index=lowerCamelCase__ )
return (loss,)
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self._step(lowerCamelCase__ )
__lowerCamelCase = dict(zip(self.loss_names , lowerCamelCase__ ) )
# tokens per batch
__lowerCamelCase = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
__lowerCamelCase = batch['input_ids'].shape[0]
__lowerCamelCase = batch['input_ids'].eq(self.pad ).sum()
__lowerCamelCase = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self._generative_step(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
__lowerCamelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__lowerCamelCase = losses['loss']
__lowerCamelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
__lowerCamelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).type_as(lowerCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase__ )
__lowerCamelCase = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
__lowerCamelCase = self.step_count
self.metrics[prefix].append(lowerCamelCase__ ) # callback writes this to self.metrics_save_path
__lowerCamelCase = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return calculate_rouge(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> dict:
'''simple docstring'''
__lowerCamelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__lowerCamelCase = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowerCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__lowerCamelCase = (time.time() - ta) / batch['input_ids'].shape[0]
__lowerCamelCase = self.ids_to_clean_text(lowerCamelCase__ )
__lowerCamelCase = self.ids_to_clean_text(batch['labels'] )
__lowerCamelCase = self._step(lowerCamelCase__ )
__lowerCamelCase = dict(zip(self.loss_names , lowerCamelCase__ ) )
__lowerCamelCase = self.calc_generative_metrics(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = np.mean(lmap(lowerCamelCase__ , lowerCamelCase__ ) )
base_metrics.update(gen_time=lowerCamelCase__ , gen_len=lowerCamelCase__ , preds=lowerCamelCase__ , target=lowerCamelCase__ , **lowerCamelCase__ )
return base_metrics
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self._generative_step(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.validation_epoch_end(lowerCamelCase__ , prefix='test' )
def lowercase_ ( self , lowerCamelCase__ ) -> SeqaSeqDataset:
'''simple docstring'''
__lowerCamelCase = self.n_obs[type_path]
__lowerCamelCase = self.target_lens[type_path]
__lowerCamelCase = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase__ , n_obs=lowerCamelCase__ , max_target_length=lowerCamelCase__ , **self.dataset_kwargs , )
return dataset
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> DataLoader:
'''simple docstring'''
__lowerCamelCase = self.get_dataset(lowerCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__lowerCamelCase = dataset.make_sortish_sampler(lowerCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__lowerCamelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_sampler=lowerCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
def lowercase_ ( self ) -> DataLoader:
'''simple docstring'''
__lowerCamelCase = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase__ )
return dataloader
def lowercase_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def lowercase_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowercase_ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowerCamelCase__ , lowerCamelCase__ )
add_generic_args(lowerCamelCase__ , lowerCamelCase__ )
parser.add_argument(
'--max_source_length' , default=1_024 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=lowerCamelCase__ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowerCamelCase__ )
parser.add_argument('--max_tokens_per_batch' , type=lowerCamelCase__ , default=lowerCamelCase__ )
parser.add_argument('--logger_name' , type=lowerCamelCase__ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=lowerCamelCase__ , default=500 , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=lowerCamelCase__ , default='summarization' , required=lowerCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=lowerCamelCase__ , default=0.0 , required=lowerCamelCase__ )
parser.add_argument('--src_lang' , type=lowerCamelCase__ , default='' , required=lowerCamelCase__ )
parser.add_argument('--tgt_lang' , type=lowerCamelCase__ , default='' , required=lowerCamelCase__ )
parser.add_argument('--eval_beams' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ )
parser.add_argument(
'--val_metric' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=lowerCamelCase__ , default=1 , required=lowerCamelCase__ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''translation'''
snake_case_ = ['''loss''']
snake_case_ = ['''bleu''']
snake_case_ = '''bleu'''
def __init__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = hparams.src_lang
__lowerCamelCase = hparams.tgt_lang
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> dict:
'''simple docstring'''
return calculate_bleu(lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase__ )
check_output_dir(UpperCamelCase__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
__lowerCamelCase = SummarizationModule(UpperCamelCase__ )
else:
__lowerCamelCase = TranslationModule(UpperCamelCase__ )
__lowerCamelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
__lowerCamelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase = os.environ.get('WANDB_PROJECT' , UpperCamelCase__ )
__lowerCamelCase = WandbLogger(name=model.output_dir.name , project=UpperCamelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__lowerCamelCase = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
__lowerCamelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__lowerCamelCase = False
__lowerCamelCase = args.val_metric == 'loss'
__lowerCamelCase = generic_train(
UpperCamelCase__ , UpperCamelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase__ ) , early_stopping_callback=UpperCamelCase__ , logger=UpperCamelCase__ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
__lowerCamelCase = ''
__lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=UpperCamelCase__ ) )
if checkpoints:
__lowerCamelCase = checkpoints[-1]
__lowerCamelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
__A = pl.Trainer.add_argparse_args(parser)
__A = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__A = parser.parse_args()
main(args)
| 167 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self , _lowercase = 768 , ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = nn.Parameter(torch.zeros(1 , _lowercase ) )
_lowerCAmelCase = nn.Parameter(torch.ones(1 , _lowercase ) )
def _lowercase ( self , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
_lowerCAmelCase = nn.Parameter(self.mean.to(_lowercase ).to(_lowercase ) )
_lowerCAmelCase = nn.Parameter(self.std.to(_lowercase ).to(_lowercase ) )
return self
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = (embeds * self.std) + self.mean
return embeds
| 5 |
from ...processing_utils import ProcessorMixin
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''SpeechT5FeatureExtractor'''
__UpperCAmelCase = '''SpeechT5Tokenizer'''
def __init__( self , lowercase_ , lowercase_) -> Optional[int]:
super().__init__(lowercase_ , lowercase_)
def __call__( self , *lowercase_ , **lowercase_) -> int:
__snake_case = kwargs.pop('audio' , lowercase_)
__snake_case = kwargs.pop('text' , lowercase_)
__snake_case = kwargs.pop('text_target' , lowercase_)
__snake_case = kwargs.pop('audio_target' , lowercase_)
__snake_case = kwargs.pop('sampling_rate' , lowercase_)
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?')
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.')
if audio is not None:
__snake_case = self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
elif text is not None:
__snake_case = self.tokenizer(lowercase_ , **lowercase_)
else:
__snake_case = None
if audio_target is not None:
__snake_case = self.feature_extractor(audio_target=lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
__snake_case = targets['input_values']
elif text_target is not None:
__snake_case = self.tokenizer(lowercase_ , **lowercase_)
__snake_case = targets['input_ids']
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get('attention_mask')
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def _a ( self , *lowercase_ , **lowercase_) -> int:
__snake_case = kwargs.pop('input_values' , lowercase_)
__snake_case = kwargs.pop('input_ids' , lowercase_)
__snake_case = kwargs.pop('labels' , lowercase_)
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.')
if input_values is not None:
__snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
elif input_ids is not None:
__snake_case = self.tokenizer.pad(lowercase_ , **lowercase_)
else:
__snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase_ , lowercase_) and "input_ids" in labels[0]):
__snake_case = self.tokenizer.pad(lowercase_ , **lowercase_)
__snake_case = targets['input_ids']
else:
__snake_case = self.feature_extractor.feature_size
__snake_case = self.feature_extractor.num_mel_bins
__snake_case = self.feature_extractor.pad(lowercase_ , *lowercase_ , **lowercase_)
__snake_case = feature_size_hack
__snake_case = targets['input_values']
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get('attention_mask')
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def _a ( self , *lowercase_ , **lowercase_) -> List[str]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> List[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
| 313 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a_ : int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a_ : Any = "cuda" if torch.cuda.is_available() else "cpu"
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Any=1_00 , _UpperCamelCase : List[Any]=" " ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = text.split(_UpperCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )]
def __lowerCAmelCase ( _UpperCamelCase : dict ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(_UpperCamelCase ):
titles.append(title if title is not None else '' )
texts.append(_UpperCamelCase )
return {"title": titles, "text": texts}
def __lowerCAmelCase ( _UpperCamelCase : dict , _UpperCamelCase : DPRContextEncoder , _UpperCamelCase : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=_UpperCamelCase , padding='longest' , return_tensors='pt' )['input_ids']
SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=_UpperCamelCase ) , return_dict=_UpperCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase ( _UpperCamelCase : "RagExampleArguments" , _UpperCamelCase : "ProcessingArguments" , _UpperCamelCase : "IndexHnswArguments" , ) -> int:
'''simple docstring'''
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE = dataset.map(
partial(_UpperCamelCase , ctx_encoder=_UpperCamelCase , ctx_tokenizer=_UpperCamelCase ) , batched=_UpperCamelCase , batch_size=processing_args.batch_size , features=_UpperCamelCase , )
# And finally save your dataset
SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(_UpperCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=_UpperCamelCase )
# And save the index
SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(_UpperCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=str(Path(SCREAMING_SNAKE_CASE ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
__UpperCamelCase =field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
__UpperCamelCase =field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
__UpperCamelCase =field(
default=str(Path(SCREAMING_SNAKE_CASE ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
__UpperCamelCase =field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
__UpperCamelCase =field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a_ : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a_ , a_ , a_ : int = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a_ : int = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase_ : Dict = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase_ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase_ : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def A__ ( self :Tuple , __snake_case :str , __snake_case :Tuple , __snake_case :List[str]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__snake_case , __snake_case , sample_weight=__snake_case ) ),
}
| 21 |
"""simple docstring"""
import re
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
_UpperCamelCase : List[Any] = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase_ ,lowercase_ ) )
if __name__ == "__main__":
lowerCamelCase__ = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 624 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCAmelCase :List[str] = logging.get_logger(__name__)
def A ( UpperCAmelCase , UpperCAmelCase ):
_snake_case : int = set()
_snake_case : Optional[int] = []
def parse_line(UpperCAmelCase ):
for line in fp:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case : str = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(UpperCAmelCase ) > 0:
_snake_case : List[str] = "\n".join(UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(UpperCAmelCase )
buffer.clear()
continue
else:
_snake_case : str = line.strip()
buffer.append(UpperCAmelCase )
if from_gh:
for filename in os.listdir(UpperCAmelCase ):
_snake_case : Union[str, Any] = os.path.join(UpperCAmelCase , UpperCAmelCase )
if not os.path.isdir(UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(UpperCAmelCase ) as fp:
parse_line(UpperCAmelCase )
else:
try:
with zipfile.ZipFile(UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(UpperCAmelCase ) as fp:
parse_line(UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def A ( UpperCAmelCase , UpperCAmelCase ):
_snake_case : Dict = set()
_snake_case : Dict = [os.path.join(UpperCAmelCase , UpperCAmelCase ) for p in os.listdir(UpperCAmelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(UpperCAmelCase , UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def A ( UpperCAmelCase ):
return values.split("," )
__lowerCAmelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__lowerCAmelCase :int = parser.parse_args()
__lowerCAmelCase :List[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCAmelCase :Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCAmelCase :Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__lowerCAmelCase :Dict = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 278 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A ( UpperCAmelCase ):
if hor == 128:
_snake_case : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_snake_case : Tuple = (32, 128, 256)
_snake_case : str = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
_snake_case : List[Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_snake_case : Optional[int] = (32, 64, 128, 256)
_snake_case : int = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
_snake_case : Dict = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_snake_case : Dict = model.state_dict()
_snake_case : Tuple = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
_snake_case : Any = UNetaDModel(**UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Optional[int] = state_dict.pop(UpperCAmelCase )
hf_value_function.load_state_dict(UpperCAmelCase )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A ( ):
_snake_case : Any = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
_snake_case : Dict = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
_snake_case : Optional[int] = model
_snake_case : List[str] = UNetaDModel(**UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : Optional[int] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Any = state_dict.pop(UpperCAmelCase )
hf_value_function.load_state_dict(UpperCAmelCase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 278 | 1 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A =[num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def lowerCamelCase_ ( lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowerCamelCase_ = []
for num in range(len(lowerCamelCase__ ) ):
lowerCamelCase_ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase_ = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase__ ) == n:
return list_nums
return []
def lowerCamelCase_ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 463 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = CLIPConfig
lowerCAmelCase__ = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> int:
super().__init__(lowercase )
lowerCamelCase_ = CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase_ = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCamelCase_ = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=0.5 , lowercase=0.5 ) -> int:
lowerCamelCase_ = self.vision_model(lowercase )[0]
lowerCamelCase_ = self.p_head(lowercase )
lowerCamelCase_ = nsfw_detected.flatten()
lowerCamelCase_ = nsfw_detected > p_threshold
lowerCamelCase_ = nsfw_detected.tolist()
if any(lowercase ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(lowercase ):
if nsfw_detected_:
lowerCamelCase_ = np.zeros(images[idx].shape )
lowerCamelCase_ = self.w_head(lowercase )
lowerCamelCase_ = watermark_detected.flatten()
lowerCamelCase_ = watermark_detected > w_threshold
lowerCamelCase_ = watermark_detected.tolist()
if any(lowercase ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(lowercase ):
if watermark_detected_:
lowerCamelCase_ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 463 | 1 |
'''simple docstring'''
from math import pow
def UpperCamelCase ( a , a , a , a , a , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__magic_name__ = int(pow(a , a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__magic_name__ , __magic_name__ = backtrack(
a , a , current_number + 1 , a , a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__magic_name__ , __magic_name__ = backtrack(
a , a , current_number + 1 , a , a )
return current_sum, solutions_count
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(a , a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCAmelCase = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _SCREAMING_SNAKE_CASE ( __a ):
@staticmethod
def snake_case__ ( a__ : ArgumentParser ):
__magic_name__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=a__ , required=a__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=a__ , required=a__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=a__ , required=a__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=a__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=a__ , default=a__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=a__ )
def __init__( self : List[str] , a__ : str , a__ : str , a__ : str , a__ : str , a__ : str , *a__ : Optional[Any] , ):
__magic_name__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
__magic_name__ = model_type
__magic_name__ = tf_checkpoint
__magic_name__ = pytorch_dump_output
__magic_name__ = config
__magic_name__ = finetuning_task_name
def snake_case__ ( self : Optional[Any] ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
if "ckpt" in self._tf_checkpoint.lower():
__magic_name__ = self._tf_checkpoint
__magic_name__ = ''''''
else:
__magic_name__ = self._tf_checkpoint
__magic_name__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
a__ , self._config , self._pytorch_dump_output , a__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 245 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ (metaclass=_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> str:
requires_backends(self , ['note_seq'] )
@classmethod
def lowercase_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(cls , ['note_seq'] )
@classmethod
def lowercase_ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
requires_backends(cls , ['note_seq'] )
| 13 |
class snake_case__ :
def __init__( self : Any ):
snake_case__ : Optional[Any] = 0
snake_case__ : Tuple = 0
snake_case__ : Any = {}
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ):
if vertex not in self.adjacency:
snake_case__ : str = {}
self.num_vertices += 1
def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
self.add_vertex(_lowerCamelCase )
self.add_vertex(_lowerCamelCase )
if head == tail:
return
snake_case__ : Optional[Any] = weight
snake_case__ : Union[str, Any] = weight
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : Optional[int] = self.get_edges()
for edge in edges:
snake_case__ , snake_case__ , snake_case__ : str = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowerCamelCase ) ):
snake_case__ : int = list(edges[i] )
edges.sort(key=lambda _lowerCamelCase : e[2] )
for i in range(len(_lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case__ : Optional[Any] = edges[i][2] + 1
for edge in edges:
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = edge
snake_case__ : Tuple = weight
snake_case__ : str = weight
def __str__( self : Optional[int] ):
snake_case__ : Tuple = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case__ : str = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.adjacency.keys()
@staticmethod
def UpperCAmelCase__ ( _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
snake_case__ : Optional[Any] = Graph()
if vertices is None:
snake_case__ : Union[str, Any] = []
if edges is None:
snake_case__ : int = []
for vertex in vertices:
g.add_vertex(_lowerCamelCase )
for edge in edges:
g.add_edge(*_lowerCamelCase )
return g
class snake_case__ :
def __init__( self : Tuple ):
snake_case__ : Optional[int] = {}
snake_case__ : Union[str, Any] = {}
def __len__( self : Optional[int] ):
return len(self.parent )
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : Any ):
if item in self.parent:
return self.find(_lowerCamelCase )
snake_case__ : Tuple = item
snake_case__ : Union[str, Any] = 0
return item
def UpperCAmelCase__ ( self : Optional[int] , _lowerCamelCase : str ):
if item not in self.parent:
return self.make_set(_lowerCamelCase )
if item != self.parent[item]:
snake_case__ : Optional[int] = self.find(self.parent[item] )
return self.parent[item]
def UpperCAmelCase__ ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
snake_case__ : int = self.find(_lowerCamelCase )
snake_case__ : str = self.find(_lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case__ : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case__ : str = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case__ : List[Any] = roota
return roota
return None
@staticmethod
def UpperCAmelCase__ ( _lowerCamelCase : Optional[Any] ):
snake_case__ : Any = graph.num_vertices
snake_case__ : Optional[Any] = Graph.UnionFind()
snake_case__ : Optional[int] = []
while num_components > 1:
snake_case__ : Any = {}
for vertex in graph.get_vertices():
snake_case__ : Dict = -1
snake_case__ : Tuple = graph.get_edges()
for edge in edges:
snake_case__ , snake_case__ , snake_case__ : Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case__ , snake_case__ , snake_case__ : Dict = edge
snake_case__ : int = union_find.find(_lowerCamelCase )
snake_case__ : int = union_find.find(_lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ : Optional[Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = cheap_edge[vertex]
if union_find.find(_lowerCamelCase ) != union_find.find(_lowerCamelCase ):
union_find.union(_lowerCamelCase , _lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
snake_case__ : List[Any] = num_components - 1
snake_case__ : Tuple = Graph.build(edges=_lowerCamelCase )
return mst
| 170 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__A : int = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:str = "AutoTokenizer"
_UpperCamelCase:Union[str, Any] = ["tokenizer"]
_UpperCamelCase:Optional[int] = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
super().__init__(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =speaker_embeddings
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **_SCREAMING_SNAKE_CASE )-> Tuple:
if speaker_embeddings_dict_path is not None:
lowerCamelCase_ =get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
lowerCamelCase_ =None
else:
with open(_SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCamelCase_ =json.load(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ =None
lowerCamelCase_ =AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(tokenizer=_SCREAMING_SNAKE_CASE , speaker_embeddings=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , _SCREAMING_SNAKE_CASE="speaker_embeddings" , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , )-> int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """v2""" ) , exist_ok=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={}
lowerCamelCase_ =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCamelCase_ =self._load_voice_preset(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , _SCREAMING_SNAKE_CASE , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =os.path.join(_SCREAMING_SNAKE_CASE , f'{prompt_key}_{key}.npy' )
lowerCamelCase_ =tmp_dict
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
super().save_pretrained(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =self.speaker_embeddings[voice_preset]
lowerCamelCase_ ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
lowerCamelCase_ =get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("""cache_dir""" , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("""force_download""" , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("""proxies""" , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("""resume_download""" , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("""local_files_only""" , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("""use_auth_token""" , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("""revision""" , _SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
lowerCamelCase_ =np.load(_SCREAMING_SNAKE_CASE )
return voice_preset_dict
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None )-> List[Any]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="pt" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , )-> Tuple:
if voice_preset is not None and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCamelCase_ =self._load_voice_preset(_SCREAMING_SNAKE_CASE )
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not voice_preset.endswith(""".npz""" ):
lowerCamelCase_ =voice_preset + """.npz"""
lowerCamelCase_ =np.load(_SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.tokenizer(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCamelCase_ =voice_preset
return encoded_text
| 720 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 75 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt"}
A_ = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
A_ = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
A_ = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ConvBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> int:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = do_lower_case
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 42 |
A_ : Union[str, Any] = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A_ : str = [{'type': 'code', 'content': INSTALL_CONTENT}]
A_ : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 456 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class _lowerCamelCase( _a ):
lowercase_ : Optional[Any] = "upernet"
def __init__( self, lowerCamelCase=None, lowerCamelCase=5_12, lowerCamelCase=0.0_2, lowerCamelCase=[1, 2, 3, 6], lowerCamelCase=True, lowerCamelCase=0.4, lowerCamelCase=3_84, lowerCamelCase=2_56, lowerCamelCase=1, lowerCamelCase=False, lowerCamelCase=2_55, **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
super().__init__(**UpperCamelCase_)
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : int = CONFIG_MAPPING["resnet"](out_features=['stage1', 'stage2', 'stage3', 'stage4'])
elif isinstance(UpperCamelCase_, UpperCamelCase_):
_lowercase : Any = backbone_config.get('model_type')
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Union[str, Any] = config_class.from_dict(UpperCamelCase_)
_lowercase : List[Any] = backbone_config
_lowercase : Dict = hidden_size
_lowercase : Optional[Any] = initializer_range
_lowercase : str = pool_scales
_lowercase : Tuple = use_auxiliary_head
_lowercase : int = auxiliary_loss_weight
_lowercase : List[Any] = auxiliary_in_channels
_lowercase : Tuple = auxiliary_channels
_lowercase : Any = auxiliary_num_convs
_lowercase : Tuple = auxiliary_concat_input
_lowercase : List[Any] = loss_ignore_index
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = copy.deepcopy(self.__dict__)
_lowercase : Optional[Any] = self.backbone_config.to_dict()
_lowercase : Any = self.__class__.model_type
return output
| 710 |
import pytest
SCREAMING_SNAKE_CASE : Optional[Any] = "__dummy_dataset1__"
SCREAMING_SNAKE_CASE : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def UpperCamelCase_( ) -> Dict:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase_( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : List[str] = dataset_loading_script_name
_lowercase : Union[str, Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=lowerCamelCase_ )
_lowercase : Optional[int] = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase_ , 'w' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
| 354 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCamelCase : int = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = 'facebook/nllb-200-distilled-600M'
__snake_case :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__snake_case :str = 'translator'
__snake_case :Optional[Any] = AutoTokenizer
__snake_case :Optional[Any] = AutoModelForSeqaSeqLM
__snake_case :Dict = LANGUAGE_CODES
__snake_case :Any = ['text', 'text', 'text']
__snake_case :Dict = ['text']
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ) -> str:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__lowercase = self.lang_to_code[src_lang]
__lowercase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_lowerCAmelCase , return_tensors="""pt""" , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase )
def _a ( self : Tuple , _lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.model.generate(**_lowerCAmelCase )
def _a ( self : List[Any] , _lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_lowerCAmelCase )
| 80 |
import heapq
import sys
import numpy as np
_lowerCamelCase : Any = tuple[int, int]
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Any) ->str:
'''simple docstring'''
A__ = []
A__ = set()
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
return len(self.elements) == 0
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]) ->List[str]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(UpperCAmelCase__)
else:
# update
# print("update", item)
A__ = []
((A__) , (A__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((A__) , (A__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Union[str, Any]:
'''simple docstring'''
if item in self.set:
self.set.remove(UpperCAmelCase__)
A__ = []
((A__) , (A__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((A__) , (A__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return self.elements[0][1]
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
((A__) , (A__)) = heapq.heappop(self.elements)
self.set.remove(UpperCAmelCase__)
return (priority, item)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = np.array(lowercase_ )
A__ = np.array(lowercase_ )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
return consistent_heuristic(lowercase_ , lowercase_ ) // t
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ )
return ans
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = np.chararray((n, n) )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
A__ = '''*'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (j, (n - 1) - i) in blocks:
A__ = '''#'''
A__ = '''-'''
A__ = back_pointer[goal]
while x != start:
((A__) , (A__)) = x
# print(x)
A__ = '''-'''
A__ = back_pointer[x]
A__ = '''-'''
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A__ = back_pointer[goal]
while x != start:
print(lowercase_ , end=''' ''' )
A__ = back_pointer[x]
print(lowercase_ )
sys.exit()
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Union[str, Any]:
"""simple docstring"""
for itera in range(lowercase_ ):
open_list[itera].remove_element(lowercase_ )
# print("s", s)
# print("j", j)
((A__) , (A__)) = s
A__ = (x - 1, y)
A__ = (x + 1, y)
A__ = (x, y + 1)
A__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowercase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowercase_ )
A__ = -1
A__ = float('''inf''' )
if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1:
A__ = g_function[s] + 1
A__ = s
if neighbours not in close_list_anchor:
open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowercase_ ):
if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key(
lowercase_ , 0 , lowercase_ , lowercase_ ):
open_list[j].put(
lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_lowerCamelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_lowerCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_lowerCamelCase : Optional[int] = make_common_ground()
_lowerCamelCase : Optional[Any] = blocks_blk
# hyper parameters
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : List[Any] = 20
_lowerCamelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
_lowerCamelCase : str = (0, 0)
_lowerCamelCase : Tuple = (n - 1, n - 1)
_lowerCamelCase : int = 1
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = {start: 0, goal: float('''inf''' )}
A__ = {start: -1, goal: -1}
A__ = []
A__ = set()
for i in range(lowercase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) )
A__ = []
A__ = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowercase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
A__ , A__ = open_list[i].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_inad.append(lowercase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowercase_ , lowercase_ , lowercase_ )
else:
A__ = open_list[0].top_show()
visited.add(lowercase_ )
expand_state(
lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
close_list_anchor.append(lowercase_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowercase_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 87 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : Any =XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__snake_case : int =torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__snake_case : int =torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case : List[Any] =torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case : Any =model(a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a , atol=1e-3 ) )
@slow
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Union[str, Any] =XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__snake_case : Union[str, Any] =torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__snake_case : Optional[int] =torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case : str =torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case : Dict =model(a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a , atol=1e-3 ) )
| 497 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowercase ( a : str ) -> None:
__snake_case , __snake_case : List[Any] =analyze_text(a )
__snake_case : Union[str, Any] =list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
__snake_case : List[Any] =sum(single_char_strings.values() )
# one length string
__snake_case : str =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__snake_case : Union[str, Any] =single_char_strings[ch]
__snake_case : Any =my_str / all_sum
my_fir_sum += prob * math.loga(a ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__snake_case : Optional[Any] =sum(two_char_strings.values() )
__snake_case : Dict =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__snake_case : Tuple =cha + cha
if sequence in two_char_strings:
__snake_case : List[Any] =two_char_strings[sequence]
__snake_case : Tuple =int(a ) / all_sum
my_sec_sum += prob * math.loga(a )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __lowercase ( a : str ) -> tuple[dict, dict]:
__snake_case : List[str] =Counter() # type: ignore
__snake_case : Union[str, Any] =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowercase ( ) -> int:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 497 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_lowercase : Any ="src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Any =direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_lowercase : str =re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Union[str, Any] =re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : Optional[int] =re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_lowercase : List[Any] =[
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase_ ( _lowercase : Any) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , _lowercase)
return [m.group(0) for m in matches]
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
a__ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a__ : Optional[Any] = {
config.replace("""Config""" , """"""): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
a__ : Any = collections.defaultdict(_lowercase)
a__ : Optional[Any] = collections.defaultdict(_lowercase)
a__ : List[str] = collections.defaultdict(_lowercase)
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowercase):
a__ : Dict = None
if _re_tf_models.match(_lowercase) is not None:
a__ : int = tf_models
a__ : int = _re_tf_models.match(_lowercase).groups()[0]
elif _re_flax_models.match(_lowercase) is not None:
a__ : str = flax_models
a__ : List[str] = _re_flax_models.match(_lowercase).groups()[0]
elif _re_pt_models.match(_lowercase) is not None:
a__ : int = pt_models
a__ : int = _re_pt_models.match(_lowercase).groups()[0]
if lookup_dict is not None:
while len(_lowercase) > 0:
if attr_name in model_prefix_to_model_type:
a__ : int = True
break
# Try again after removing the last word in the name
a__ : Union[str, Any] = """""".join(camel_case_split(_lowercase)[:-1])
a__ : Any = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys()))
a__ : Tuple = list(_lowercase)
all_models.sort()
a__ : Dict = {"""model_type""": all_models}
a__ : Tuple = [pt_models[t] for t in all_models]
a__ : Tuple = [tf_models[t] for t in all_models]
a__ : Optional[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
a__ : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
a__ : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
a__ : str = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
a__ : Union[str, Any] = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
a__ : Union[str, Any] = """AutoTokenizer"""
a__ : str = [processors[t] for t in all_models]
return pd.DataFrame(_lowercase)
def lowerCAmelCase_ ( _lowercase : Dict) -> Optional[Any]:
"""simple docstring"""
a__ : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
a__ : Tuple = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
a__ : Dict = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(_lowercase , _lowercase , _lowercase):
# The type of pipeline may not exist in this framework
if not hasattr(_lowercase , _lowercase):
continue
# First extract all model_names
a__ : Optional[int] = []
for name in getattr(_lowercase , _lowercase).values():
if isinstance(_lowercase , _lowercase):
model_names.append(_lowercase)
else:
model_names.extend(list(_lowercase))
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names})
return table
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Optional[Any]) -> Dict:
"""simple docstring"""
a__ : Any = get_frameworks_table()
a__ : Dict = Dataset.from_pandas(_lowercase)
a__ : Dict = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=_lowercase)
a__ : int = Dataset.from_json(_lowercase)
a__ : Optional[int] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowercase))
}
a__ : Optional[int] = update_pipeline_and_auto_class_table(_lowercase)
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
a__ : Union[str, Any] = sorted(table.keys())
a__ : Optional[Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
})
a__ : Any = Dataset.from_pandas(_lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowercase , """frameworks.json"""))
tags_dataset.to_json(os.path.join(_lowercase , """pipeline_tags.json"""))
if commit_sha is not None:
a__ : Tuple = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
a__ : Tuple = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=_lowercase , repo_type="""dataset""" , token=_lowercase , commit_message=_lowercase , )
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
a__ : Dict = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
a__ : Optional[int] = transformers_module.pipelines.SUPPORTED_TASKS
a__ : Tuple = []
for key in pipeline_tasks:
if key not in in_table:
a__ : str = pipeline_tasks[key]["""pt"""]
if isinstance(_lowercase , (list, tuple)):
a__ : List[str] = model[0]
a__ : List[Any] = model.__name__
if model not in in_table.values():
missing.append(_lowercase)
if len(_lowercase) > 0:
a__ : Union[str, Any] = """, """.join(_lowercase)
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F'''`utils/update_metadata.py`: {msg}. Please add them!''')
if __name__ == "__main__":
_lowercase : str =argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
_lowercase : int =parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 136 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : int) -> Any:
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Optional[int]=True) -> str:
"""simple docstring"""
model.train()
a__ : Optional[Any] = model(_lowercase)
a__ : Optional[int] = F.mse_loss(_lowercase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowercase)
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Any=False) -> Optional[int]:
"""simple docstring"""
set_seed(42)
a__ : Any = RegressionModel()
a__ : int = deepcopy(_lowercase)
a__ : Dict = RegressionDataset(length=80)
a__ : Union[str, Any] = DataLoader(_lowercase , batch_size=16)
model.to(accelerator.device)
if sched:
a__ : Optional[int] = AdamW(params=model.parameters() , lr=1e-3)
a__ : Optional[int] = AdamW(params=ddp_model.parameters() , lr=1e-3)
a__ : Tuple = LambdaLR(_lowercase , lr_lambda=lambda _lowercase: epoch**0.65)
a__ : Tuple = LambdaLR(_lowercase , lr_lambda=lambda _lowercase: epoch**0.65)
# Make a copy of `model`
if sched:
a__ , a__ , a__ , a__ : Optional[Any] = accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase)
else:
a__ , a__ : Optional[Any] = accelerator.prepare(_lowercase , _lowercase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( _lowercase : List[str]) -> int:
"""simple docstring"""
# Test when on a single CPU or GPU that the context manager does nothing
a__ , a__ , a__ : int = get_training_setup(_lowercase)
# Use a single batch
a__ , a__ : Union[str, Any] = next(iter(_lowercase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
a__ , a__ : List[Any] = accelerator.gather((ddp_input, ddp_target))
a__ , a__ : Optional[Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowercase):
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
else:
# Sync grads
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowercase , _lowercase , _lowercase , _lowercase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
a__ : Union[str, Any] = ddp_input[torch.randperm(len(_lowercase))]
def lowerCAmelCase_ ( _lowercase : List[Any]) -> List[str]:
"""simple docstring"""
# Test on distributed setup that context manager behaves properly
a__ , a__ , a__ : Optional[Any] = get_training_setup(_lowercase)
# Use a single batch
a__ , a__ : Optional[Any] = next(iter(_lowercase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
a__ , a__ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target))
a__ , a__ : str = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowercase):
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
else:
# Sync grads
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
a__ : List[Any] = ddp_input[torch.randperm(len(_lowercase))]
def lowerCAmelCase_ ( _lowercase : int=False , _lowercase : Any=False) -> List[str]:
"""simple docstring"""
a__ : List[Any] = Accelerator(
split_batches=_lowercase , dispatch_batches=_lowercase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
a__ , a__ , a__ : Dict = get_training_setup(_lowercase)
for iteration, batch in enumerate(_lowercase):
a__ , a__ : int = batch.values()
# Gather the distributed inputs and targs for the base model
a__ , a__ : Tuple = accelerator.gather((ddp_input, ddp_target))
a__ , a__ : Optional[Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowercase):
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowercase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
a__ : str = ddp_input[torch.randperm(len(_lowercase))]
GradientState._reset_state()
def lowerCAmelCase_ ( _lowercase : Dict=False , _lowercase : Union[str, Any]=False) -> Optional[Any]:
"""simple docstring"""
a__ : int = Accelerator(
split_batches=_lowercase , dispatch_batches=_lowercase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
a__ , a__ , a__ , a__ , a__ , a__ , a__ : Optional[int] = get_training_setup(_lowercase , _lowercase)
for iteration, batch in enumerate(_lowercase):
a__ , a__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
a__ , a__ : List[str] = accelerator.gather((ddp_input, ddp_target))
a__ , a__ : Optional[Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowercase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowercase):
step_model(_lowercase , _lowercase , _lowercase , _lowercase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
a__ : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowercase))
if accelerator.num_processes > 1:
check_model_parameters(_lowercase , _lowercase , _lowercase , _lowercase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
a__ : int = Accelerator()
a__ : List[Any] = RegressionDataset(length=80)
a__ : Dict = DataLoader(_lowercase , batch_size=16)
a__ : Dict = RegressionDataset(length=96)
a__ : Optional[int] = DataLoader(_lowercase , batch_size=16)
a__ , a__ : Optional[int] = accelerator.prepare(_lowercase , _lowercase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowercase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowercase)
if iteration < len(_lowercase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowercase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowercase)
if batch_num < len(_lowercase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = Accelerator()
a__ : Optional[Any] = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""")
test_noop_sync(_lowercase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""")
test_distributed_sync(_lowercase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowercase , _lowercase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : Dict) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 136 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : Union[str, Any] , __A : Dict=1_2 , __A : Optional[int]=7 , __A : List[Any]=True , __A : int=True , __A : str=True , __A : Optional[int]=9_9 , __A : str=3_2 , __A : Tuple=3_2 , __A : List[str]=2 , __A : int=4 , __A : List[Any]=3_7 , __A : List[Any]=0.1 , __A : Optional[Any]=0.1 , __A : Union[str, Any]=5_1_2 , __A : Optional[Any]=0.0_2 , __A : Any=0 , __A : Dict=None , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = projection_dim
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = max_position_embeddings
_lowercase = initializer_range
_lowercase = scope
_lowercase = bos_token_id
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowercase = input_mask.numpy()
_lowercase , _lowercase = input_mask.shape
_lowercase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_lowercase = 1
_lowercase = 0
_lowercase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def snake_case ( self : int ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def snake_case ( self : List[str] , __A : Optional[int] , __A : str , __A : List[str] ):
"""simple docstring"""
_lowercase = TFBlipTextModel(config=_A )
_lowercase = model(_A , attention_mask=_A , training=_A )
_lowercase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = BlipTextModelTester(self )
_lowercase = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def snake_case ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def snake_case ( self : int ):
"""simple docstring"""
pass
def snake_case ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def snake_case ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case ( self : Any ):
"""simple docstring"""
pass
@slow
def snake_case ( self : Tuple ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def snake_case ( self : Dict , __A : Dict=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 705 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : List[str] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'swin2sr'
UpperCAmelCase__ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Tuple , __A : Union[str, Any]=6_4 , __A : Union[str, Any]=1 , __A : int=3 , __A : Any=1_8_0 , __A : int=[6, 6, 6, 6, 6, 6] , __A : Tuple=[6, 6, 6, 6, 6, 6] , __A : List[Any]=8 , __A : Tuple=2.0 , __A : Dict=True , __A : Dict=0.0 , __A : Dict=0.0 , __A : Any=0.1 , __A : str="gelu" , __A : Optional[Any]=False , __A : Optional[Any]=0.0_2 , __A : List[str]=1e-5 , __A : Optional[int]=2 , __A : List[str]=1.0 , __A : Union[str, Any]="1conv" , __A : Dict="pixelshuffle" , **__A : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__A )
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = embed_dim
_lowercase = depths
_lowercase = len(__A )
_lowercase = num_heads
_lowercase = window_size
_lowercase = mlp_ratio
_lowercase = qkv_bias
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = drop_path_rate
_lowercase = hidden_act
_lowercase = use_absolute_embeddings
_lowercase = layer_norm_eps
_lowercase = initializer_range
_lowercase = upscale
_lowercase = img_range
_lowercase = resi_connection
_lowercase = upsampler
| 602 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Tuple ='''layoutlmv3'''
def __init__( self :Optional[int], snake_case :int=5_0265, snake_case :int=768, snake_case :Any=12, snake_case :Union[str, Any]=12, snake_case :List[str]=3072, snake_case :List[str]="gelu", snake_case :List[str]=0.1, snake_case :Optional[int]=0.1, snake_case :Optional[int]=512, snake_case :Tuple=2, snake_case :Optional[Any]=0.0_2, snake_case :Optional[int]=1e-5, snake_case :Union[str, Any]=1, snake_case :Dict=0, snake_case :Tuple=2, snake_case :Tuple=1024, snake_case :Optional[Any]=128, snake_case :Optional[Any]=128, snake_case :List[str]=True, snake_case :str=32, snake_case :Optional[int]=128, snake_case :Dict=64, snake_case :List[str]=256, snake_case :Optional[Any]=True, snake_case :Optional[Any]=True, snake_case :Dict=True, snake_case :Optional[Any]=224, snake_case :int=3, snake_case :int=16, snake_case :str=None, **snake_case :Dict, ):
"""simple docstring"""
super().__init__(
vocab_size=snake_case, hidden_size=snake_case, num_hidden_layers=snake_case, num_attention_heads=snake_case, intermediate_size=snake_case, hidden_act=snake_case, hidden_dropout_prob=snake_case, attention_probs_dropout_prob=snake_case, max_position_embeddings=snake_case, type_vocab_size=snake_case, initializer_range=snake_case, layer_norm_eps=snake_case, pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, **snake_case, )
_lowercase =max_ad_position_embeddings
_lowercase =coordinate_size
_lowercase =shape_size
_lowercase =has_relative_attention_bias
_lowercase =rel_pos_bins
_lowercase =max_rel_pos
_lowercase =has_spatial_attention_bias
_lowercase =rel_ad_pos_bins
_lowercase =max_rel_ad_pos
_lowercase =text_embed
_lowercase =visual_embed
_lowercase =input_size
_lowercase =num_channels
_lowercase =patch_size
_lowercase =classifier_dropout
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =version.parse('''1.12''' )
@property
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
])
@property
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return 12
def UpperCamelCase__ ( self :List[Any], snake_case :"ProcessorMixin", snake_case :int = -1, snake_case :int = -1, snake_case :bool = False, snake_case :Optional["TensorType"] = None, snake_case :int = 3, snake_case :int = 40, snake_case :int = 40, ):
"""simple docstring"""
setattr(processor.image_processor, 'apply_ocr', snake_case)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase =compute_effective_axis_dimension(
snake_case, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase =processor.tokenizer.num_special_tokens_to_add(snake_case)
_lowercase =compute_effective_axis_dimension(
snake_case, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=snake_case)
# Generate dummy inputs according to compute batch and sequence
_lowercase =[[' '.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
_lowercase =[[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_lowercase =self._generate_dummy_images(snake_case, snake_case, snake_case, snake_case)
_lowercase =dict(
processor(
snake_case, text=snake_case, boxes=snake_case, return_tensors=snake_case, ))
return inputs
| 181 | 1 |
from itertools import product
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> list[int]:
lowerCAmelCase_ : int = sides_number
lowerCAmelCase_ : List[str] = max_face_number * dice_number
lowerCAmelCase_ : Any = [0] * (max_total + 1)
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : str = range(lowerCAmelCase_ , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase_ , repeat=lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[Any] = sum(lowerCAmelCase_ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCAmelCase ( )-> float:
lowerCAmelCase_ : List[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase_ : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : List[str] = 9
lowerCAmelCase_ : Union[str, Any] = 4 * 9
lowerCAmelCase_ : Tuple = 6
for peter_total in range(lowerCAmelCase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase_ : Union[str, Any] = (4**9) * (6**6)
lowerCAmelCase_ : Union[str, Any] = peter_wins_count / total_games_number
lowerCAmelCase_ : Optional[Any] = round(lowerCAmelCase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""") | 619 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase : Tuple =10
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : int = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = (left + right) // 3 + 1
lowerCAmelCase_ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : Dict = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : List[Any] = two_third + 1
else:
lowerCAmelCase_ : Union[str, Any] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = (left + right) // 3 + 1
lowerCAmelCase_ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase : Union[str, Any] =[int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase : int =int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase : Optional[Any] =ite_ternary_search(collection, target)
_UpperCAmelCase : List[str] =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""") | 619 | 1 |
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = name
UpperCamelCase_: Any = value
UpperCamelCase_: Optional[Any] = weight
def __repr__( self ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _a ( self ):
return self.value
def _a ( self ):
return self.name
def _a ( self ):
return self.weight
def _a ( self ):
return self.value / self.weight
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
UpperCamelCase_: Any = []
for i in range(len(UpperCAmelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Optional[int] = sorted(UpperCAmelCase__ , key=UpperCAmelCase__ , reverse=UpperCAmelCase__ )
UpperCamelCase_: Dict = []
UpperCamelCase_ ,UpperCamelCase_: Any = 0.0, 0.0
for i in range(len(UpperCAmelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case () -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 57 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __A ( A_ ):
UpperCamelCase :List[str] = '''gpt_neo'''
UpperCamelCase :Tuple = ['''past_key_values''']
UpperCamelCase :Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self , __magic_name__=50257 , __magic_name__=2048 , __magic_name__=2048 , __magic_name__=24 , __magic_name__=[[["global", "local"], 12]] , __magic_name__=16 , __magic_name__=None , __magic_name__=256 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1E-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=50256 , __magic_name__=50256 , **__magic_name__ , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : List[Any] = num_layers
lowerCamelCase__ : List[Any] = num_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = window_size
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : Any = resid_dropout
lowerCamelCase__ : Dict = embed_dropout
lowerCamelCase__ : str = attention_dropout
lowerCamelCase__ : str = classifier_dropout
lowerCamelCase__ : str = layer_norm_epsilon
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = bos_token_id
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : str = attention_types
lowerCamelCase__ : List[str] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def _snake_case (__magic_name__ ):
lowerCamelCase__ : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A (UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ) ->int:
'''simple docstring'''
import torch
lowerCamelCase__ : Any = input.size()
lowerCamelCase__ : Tuple = len(UpperCamelCase )
lowerCamelCase__ : str = shape[dimension]
lowerCamelCase__ : Optional[int] = torch.arange(0 , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="""floor""" ) + 1
lowerCamelCase__ : Tuple = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase__ : Dict = [slice(UpperCamelCase )] * rank
lowerCamelCase__ : Union[str, Any] = indices
lowerCamelCase__ : Optional[int] = input[s]
lowerCamelCase__ : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def _A (UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
import torch
lowerCamelCase__ : List[Any] = torch.arange(1 , UpperCamelCase )
lowerCamelCase__ : Any = torch.remainder(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = remainders == 0
lowerCamelCase__ : List[str] = candidates[divisor_indices]
lowerCamelCase__ : List[Any] = torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="""floor""" )
class __A ( A_ ):
@property
def _snake_case (self ):
lowerCamelCase__ : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase__ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ : int = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _snake_case (self ):
return self._config.num_heads
def _snake_case (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase__ : Union[str, Any] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ ,lowerCamelCase__ : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Any = seqlen + 2
lowerCamelCase__ : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : Dict = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ : int = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def _snake_case (self ):
return 13
| 157 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def __A ( a_ : int ):
lowerCAmelCase : typing.Counter[int] = Counter()
for base in range(1 ,max_perimeter + 1 ):
for perpendicular in range(a_ ,max_perimeter + 1 ):
lowerCAmelCase : str = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a_ ):
lowerCAmelCase : List[str] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __A ( a_ : int = 1_0_0_0 ):
lowerCAmelCase : Optional[Any] = pythagorean_triple(a_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'''Perimeter {solution()} has maximum solutions''')
| 551 |
'''simple docstring'''
import numpy as np
def __A ( a_ : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
UpperCamelCase = TypeVar('T')
class _A ( Generic[T] ):
def __init__( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Tuple = len(lowerCAmelCase_ )
__UpperCamelCase : Optional[int] = [any_type for _ in range(self.N )] + arr
__UpperCamelCase : Tuple = fnc
self.build()
def a ( self : Union[str, Any] ):
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
__UpperCamelCase : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def a ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
p += self.N
__UpperCamelCase : List[Any] = v
while p > 1:
__UpperCamelCase : Union[str, Any] = p // 2
__UpperCamelCase : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def a ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ): # noqa: E741
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] = l + self.N, r + self.N
__UpperCamelCase : Optional[Any] = None
while l <= r:
if l % 2 == 1:
__UpperCamelCase : int = self.st[l] if res is None else self.fn(lowerCAmelCase_ , self.st[l] )
if r % 2 == 0:
__UpperCamelCase : Optional[Any] = self.st[r] if res is None else self.fn(lowerCAmelCase_ , self.st[r] )
__UpperCamelCase , __UpperCamelCase : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
UpperCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
UpperCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
UpperCamelCase = SegmentTree(test_array, min)
UpperCamelCase = SegmentTree(test_array, max)
UpperCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCamelCase ( ) -> None:
for i in range(len(__A ) ):
for j in range(__A , len(__A ) ):
__UpperCamelCase : List[str] = reduce(__A , test_array[i : j + 1] )
__UpperCamelCase : str = reduce(__A , test_array[i : j + 1] )
__UpperCamelCase : int = reduce(lambda __lowerCAmelCase , __lowerCAmelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__A , __A )
assert max_range == max_segment_tree.query(__A , __A )
assert sum_range == sum_segment_tree.query(__A , __A )
test_all_segments()
for index, value in test_updates.items():
UpperCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 269 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """time_series_transformer"""
__lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_snake_case = embedding_dimension
else:
_snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(lowerCAmelCase_ ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 495 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : int = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 93 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : Optional[Any] = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 93 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase_ : str = [
'good first issue',
'feature request',
'wip',
]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Github(os.environ["GITHUB_TOKEN"] )
_lowerCamelCase : Optional[int] = g.get_repo("huggingface/accelerate" )
_lowerCamelCase : Dict = repo.get_issues(state="open" )
for issue in open_issues:
_lowerCamelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
_lowerCamelCase : Any = comments[0] if len(_lowerCAmelCase ) > 0 else None
_lowerCamelCase : List[Any] = dt.utcnow()
_lowerCamelCase : Optional[Any] = (current_time - issue.updated_at).days
_lowerCamelCase : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main() | 44 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ , A__ ) -> list[tuple[int, int]]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = position
UpperCamelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase = []
for position in positions:
UpperCamelCase , UpperCamelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A__ )
return permissible_positions
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def __lowerCamelCase ( A__ , A__ , A__ ) -> bool:
"""simple docstring"""
if is_complete(A__ ):
return True
for position in get_valid_pos(A__ , len(A__ ) ):
UpperCamelCase , UpperCamelCase = position
if board[y][x] == 0:
UpperCamelCase = curr + 1
if open_knight_tour_helper(A__ , A__ , curr + 1 ):
return True
UpperCamelCase = 0
return False
def __lowerCamelCase ( A__ ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase = [[0 for i in range(A__ )] for j in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
UpperCamelCase = 1
if open_knight_tour_helper(A__ , (i, j) , 1 ):
return board
UpperCamelCase = 0
UpperCamelCase = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Dict = "encoder-decoder"
A : List[str] = True
def __init__( self : Union[str, Any] , **_lowerCAmelCase : List[str] ):
super().__init__(**_lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__snake_case : Any = kwargs.pop("""encoder""" )
__snake_case : Union[str, Any] = encoder_config.pop("""model_type""" )
__snake_case : Union[str, Any] = kwargs.pop("""decoder""" )
__snake_case : Optional[int] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__snake_case : List[Any] = AutoConfig.for_model(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : int = AutoConfig.for_model(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Dict = True
@classmethod
def snake_case__ ( cls : Optional[int] , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : PretrainedConfig , **_lowerCAmelCase : Tuple ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__snake_case : Optional[int] = True
__snake_case : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : int = copy.deepcopy(self.__dict__ )
__snake_case : Optional[int] = self.encoder.to_dict()
__snake_case : Dict = self.decoder.to_dict()
__snake_case : Any = self.__class__.model_type
return output
| 390 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Any = ["pixel_values"]
def __init__( self : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_55 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
__snake_case : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__snake_case : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
__snake_case : Union[str, Any] = do_resize
__snake_case : Optional[Any] = size
__snake_case : int = do_center_crop
__snake_case : Dict = crop_size
__snake_case : Dict = resample
__snake_case : Tuple = do_rescale
__snake_case : Optional[int] = rescale_factor
__snake_case : str = do_normalize
__snake_case : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
__snake_case : List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
__snake_case : Tuple = get_resize_output_image_size(_lowerCAmelCase , size["""shortest_edge"""] , default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
__snake_case : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
__snake_case : List[str] = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Dict , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__snake_case : Tuple = to_numpy_array(_lowerCAmelCase )
if do_resize:
__snake_case : List[Any] = self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase )
if do_center_crop:
__snake_case : Dict = self.center_crop(_lowerCAmelCase , size=_lowerCAmelCase )
if do_rescale:
__snake_case : int = self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase )
if do_normalize:
__snake_case : List[Any] = self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase )
__snake_case : Optional[Any] = to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase )
return image
def snake_case__ ( self : List[str] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : Union[str, Any] , ):
__snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
__snake_case : Any = resample if resample is not None else self.resample
__snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
__snake_case : List[str] = image_std if image_std is not None else self.image_std
__snake_case : Optional[Any] = size if size is not None else self.size
__snake_case : int = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__snake_case : Any = crop_size if crop_size is not None else self.crop_size
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__snake_case : Optional[Any] = make_batched(_lowerCAmelCase )
__snake_case : int = [
[
self._preprocess_image(
image=_lowerCAmelCase , do_resize=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , crop_size=_lowerCAmelCase , do_rescale=_lowerCAmelCase , rescale_factor=_lowerCAmelCase , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , data_format=_lowerCAmelCase , )
for img in video
]
for video in videos
]
__snake_case : Optional[int] = {"""pixel_values""": videos}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 390 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase_ : Optional[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCAmelCase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase_ : List[str] = {'unk_token': '<unk>'}
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
lowerCAmelCase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
lowerCAmelCase_ : int = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a_ , a_ )
def lowerCamelCase ( self : Optional[int] , **a_ : Tuple ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : List[str] , **a_ : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : Optional[Any] , **a_ : Dict ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : str = self.get_rust_tokenizer()
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : Any = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
lowerCAmelCase_ : Any = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a_ )
self.assertIsInstance(processor_fast.tokenizer , a_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a_ )
self.assertIsInstance(processor_fast.image_processor , a_ )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : str = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase_ : Optional[Any] = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
lowerCAmelCase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : List[Any] = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Dict = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
lowerCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Union[str, Any] = image_processor(a_ , return_tensors="np" )
lowerCAmelCase_ : Union[str, Any] = processor(images=a_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : int = self.get_image_processor()
lowerCAmelCase_ : Any = self.get_tokenizer()
lowerCAmelCase_ : int = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
lowerCAmelCase_ : Dict = 'lower newer'
lowerCAmelCase_ : Optional[Any] = processor(text=a_ )
lowerCAmelCase_ : Tuple = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = self.get_image_processor()
lowerCAmelCase_ : Any = self.get_tokenizer()
lowerCAmelCase_ : Any = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
lowerCAmelCase_ : Optional[Any] = 'lower newer'
lowerCAmelCase_ : Any = self.prepare_image_inputs()
lowerCAmelCase_ : str = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : int = self.get_image_processor()
lowerCAmelCase_ : List[str] = self.get_tokenizer()
lowerCAmelCase_ : Any = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
lowerCAmelCase_ : List[str] = self.prepare_image_inputs()
lowerCAmelCase_ : str = self.prepare_image_inputs()
lowerCAmelCase_ : List[str] = processor(images=a_ , visual_prompt=a_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Any = self.get_image_processor()
lowerCAmelCase_ : str = self.get_tokenizer()
lowerCAmelCase_ : str = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
lowerCAmelCase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : str = processor.batch_decode(a_ )
lowerCAmelCase_ : Dict = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ , a_ )
| 610 |
import os
def A_ ( A__ = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
a__ : Dict = in_file.read()
a__ : Optional[Any] = [[int(A__ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
a__ : Optional[int] = [[0 for cell in row] for row in grid]
a__ : Tuple = len(grid[0] )
a__ : Optional[int] = [[0 for i in range(A__ )] for j in range(A__ )]
a__ : List[str] = grid[0][0]
for i in range(1 , A__ ):
a__ : Tuple = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
a__ : Optional[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
a__ : Tuple = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 302 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowercase__ : Optional[int] = re.compile(R"\s+")
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCamelCase__ , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = [len(UpperCamelCase__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(UpperCamelCase__ ), "line_max": max(UpperCamelCase__ )}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def lowerCamelCase__ ( _A , _A=5 ):
'''simple docstring'''
snake_case_ = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ = example["content"].splitlines()
for _, line in zip(range(UpperCamelCase__ ) , UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase__ ( _A , _A=5 , _A=0.05 ):
'''simple docstring'''
snake_case_ = ["unit tests", "test file", "configuration file"]
snake_case_ = example["content"].splitlines()
snake_case_ = 0
snake_case_ = 0
# first test
for _, line in zip(range(UpperCamelCase__ ) , UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ = example["content"].count("\n" )
snake_case_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = ["def ", "class ", "for ", "while "]
snake_case_ = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase__ ( _A , _A=4 ):
'''simple docstring'''
snake_case_ = example["content"].splitlines()
snake_case_ = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = tokenizer(example["content"] , truncation=UpperCamelCase__ )["input_ids"]
snake_case_ = len(example["content"] ) / len(UpperCamelCase__ )
return {"ratio": ratio}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {}
results.update(get_hash(UpperCamelCase__ ) )
results.update(line_stats(UpperCamelCase__ ) )
results.update(alpha_stats(UpperCamelCase__ ) )
results.update(char_token_ratio(UpperCamelCase__ ) )
results.update(is_autogenerated(UpperCamelCase__ ) )
results.update(is_config_or_test(UpperCamelCase__ ) )
results.update(has_no_keywords(UpperCamelCase__ ) )
results.update(has_few_assignments(UpperCamelCase__ ) )
return results
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
if not check_uniques(UpperCamelCase__ , UpperCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(UpperCamelCase__ , "rb" ) as f_in:
with gzip.open(str(UpperCamelCase__ ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
os.unlink(UpperCamelCase__ )
# Settings
lowercase__ : Optional[Any] = HfArgumentParser(PreprocessingArguments)
lowercase__ : List[Any] = parser.parse_args()
if args.num_workers is None:
lowercase__ : int = multiprocessing.cpu_count()
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowercase__ : Any = time.time()
lowercase__ : int = load_dataset(args.dataset_name, split="train")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
lowercase__ : Dict = time.time()
lowercase__ : str = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
lowercase__ : Optional[int] = set(ds.unique("hash"))
lowercase__ : Tuple = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
lowercase__ : List[Any] = time.time()
lowercase__ : Optional[int] = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowercase__ : str = time.time()
lowercase__ : int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
lowercase__ : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
lowercase__ : str = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
lowercase__ : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowercase__ : List[str] = str(data_dir / f'''file-{file_number+1:012}.json''')
lowercase__ : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 720 |
from __future__ import annotations
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(_A )] for _ in range(_A )]
snake_case_ = run_maze(_A , 0 , 0 , _A )
if solved:
print("\n".join(str(_A ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = len(_A )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(_A , i + 1 , _A , _A )
or run_maze(_A , _A , j + 1 , _A )
or run_maze(_A , i - 1 , _A , _A )
or run_maze(_A , _A , j - 1 , _A )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 0 |
A = 9.8_0_6_6_5
def a(lowercase__ , lowercase__ , lowercase__ = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 187 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 16
lowerCamelCase_ = 32
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase = 16 ):
SCREAMING_SNAKE_CASE__ =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ =load_dataset("""glue""", """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ =tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=__UpperCamelCase, max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ =datasets.map(
__UpperCamelCase, batched=__UpperCamelCase, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ =tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ =8
else:
SCREAMING_SNAKE_CASE__ =None
return tokenizer.pad(
__UpperCamelCase, padding="""longest""", max_length=__UpperCamelCase, pad_to_multiple_of=__UpperCamelCase, return_tensors="""pt""", )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ =DataLoader(
tokenized_datasets["""train"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =DataLoader(
tokenized_datasets["""validation"""], shuffle=__UpperCamelCase, collate_fn=__UpperCamelCase, batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", __UpperCamelCase ) == "1":
SCREAMING_SNAKE_CASE__ =2
# New Code #
SCREAMING_SNAKE_CASE__ =int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ =Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ =config["""lr"""]
SCREAMING_SNAKE_CASE__ =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ =int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ =evaluate.load("""glue""", """mrpc""" )
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =get_dataloaders(__UpperCamelCase, __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ =AdamW(params=model.parameters(), lr=__UpperCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ =get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.prepare(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ =model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase, references=__UpperCamelCase, )
SCREAMING_SNAKE_CASE__ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", __UpperCamelCase )
def UpperCAmelCase_ ( ):
SCREAMING_SNAKE_CASE__ =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=__UpperCamelCase, default=__UpperCamelCase, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""", type=__UpperCamelCase, default=1, help="""The number of minibatches to be ran before gradients are accumulated.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ =parser.parse_args()
SCREAMING_SNAKE_CASE__ ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase, __UpperCamelCase )
if __name__ == "__main__":
main()
| 151 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A__ : List[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
"""simple docstring"""
if attention_mask is None:
_lowercase: Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase: Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase: Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase: str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase: List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __magic_name__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=32 , A_=2 , A_=1 , A_=0 , A_=0.02 , ) -> Any:
"""simple docstring"""
_lowercase: List[str] = parent
_lowercase: int = batch_size
_lowercase: Tuple = seq_length
_lowercase: Tuple = is_training
_lowercase: Dict = use_labels
_lowercase: Dict = vocab_size
_lowercase: Any = hidden_size
_lowercase: Tuple = num_hidden_layers
_lowercase: Tuple = num_attention_heads
_lowercase: Optional[Any] = intermediate_size
_lowercase: str = hidden_act
_lowercase: Any = hidden_dropout_prob
_lowercase: Dict = attention_probs_dropout_prob
_lowercase: Optional[int] = max_position_embeddings
_lowercase: int = eos_token_id
_lowercase: Tuple = pad_token_id
_lowercase: Union[str, Any] = bos_token_id
_lowercase: List[str] = initializer_range
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: Dict = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase: Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase: int = shift_tokens_right(__lowerCamelCase , 1 , 2 )
_lowercase: Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
_lowercase: Tuple = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: int = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowercase: Any = 20
_lowercase: Any = model_class_name(__lowerCamelCase )
_lowercase: Tuple = model.encode(inputs_dict['''input_ids'''] )
_lowercase: int = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase: str = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
_lowercase: List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_lowercase: Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase: Dict = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_lowercase: str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowercase: Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
_lowercase: Dict = model.decode(__lowerCamelCase , __lowerCamelCase )
_lowercase: str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def lowercase_ ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: List[Any] = 20
_lowercase: Optional[int] = model_class_name(__lowerCamelCase )
_lowercase: List[str] = model.encode(inputs_dict['''input_ids'''] )
_lowercase: int = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowercase: Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase: Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
_lowercase: Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase: Optional[int] = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_lowercase: Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_lowercase: Dict = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_lowercase: Dict = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
_lowercase: Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __magic_name__ ( unittest.TestCase ):
UpperCamelCase_ = 99
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_lowercase: Optional[Any] = input_ids.shape[0]
_lowercase: str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: List[str] = self._get_config_and_data()
_lowercase: Optional[Any] = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
_lowercase: Any = lm_model(input_ids=__lowerCamelCase )
_lowercase: str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __lowerCamelCase )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_lowercase: Optional[int] = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
_lowercase: str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_lowercase: List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_lowercase: int = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
_lowercase: List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __lowerCamelCase )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_lowercase: int = shift_tokens_right(__lowerCamelCase , 1 , 2 )
_lowercase: List[Any] = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
_lowercase: Dict = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __magic_name__ ( lowercase__ , unittest.TestCase , lowercase__ ):
UpperCamelCase_ = True
UpperCamelCase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: int = FlaxBlenderbotModelTester(self )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase: Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_lowercase: Union[str, Any] = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(A_ , A_=None , **A_ ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
_lowercase: Optional[Any] = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowercase: List[Any] = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase: Union[str, Any] = model_class(__lowerCamelCase )
_lowercase: str = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_lowercase: List[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(A_ , A_ , A_ ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
_lowercase: Optional[int] = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowercase: Any = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase: str = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase: int = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase: Optional[int] = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: List[Any] = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
_lowercase: Any = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
_lowercase: Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__lowerCamelCase )
_lowercase: Dict = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_lowercase: Dict = ["Sam"]
_lowercase: List[Any] = tokenizer(__lowerCamelCase , return_tensors='''jax''' )
_lowercase: Any = model.generate(**__lowerCamelCase , **__lowerCamelCase )
_lowercase: List[Any] = "Sam is a great name. It means \"sun\" in Gaelic."
_lowercase: int = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 705 |
"""simple docstring"""
import qiskit
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: Dict = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowercase: str = qiskit.QuantumCircuit(_UpperCamelCase , _UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase: Tuple = qiskit.execute(_UpperCamelCase , _UpperCamelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCamelCase )
if __name__ == "__main__":
A__ : Optional[int] = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 272 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Dict ,lowerCamelCase : List[Any] ,lowerCamelCase : Tuple ):
_A : Any = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(lowerCamelCase ,'r' ) as f:
_A : Optional[int] = f.readlines()
_A : str = F'class {class_name}('
_A : Tuple = F'{4 * " "}def {test_name}('
_A : Union[str, Any] = F'{8 * " "}{correct_line.split()[0]}'
_A : List[str] = F'{16 * " "}{correct_line.split()[0]}'
_A : List[str] = False
_A : Union[str, Any] = False
_A : Optional[Any] = False
_A : int = False
_A : Union[str, Any] = 0
_A : Union[str, Any] = 0
_A : Any = []
for line in lines:
if line.startswith(lowerCamelCase ):
_A : List[Any] = True
elif in_class and line.startswith(lowerCamelCase ):
_A : Any = True
elif in_class and in_func and (line.startswith(lowerCamelCase ) or line.startswith(lowerCamelCase )):
_A : Optional[int] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_A : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_A : int = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
_A : int = False
else:
new_lines.append(lowerCamelCase )
with open(lowerCamelCase ,'w' ) as f:
for line in new_lines:
f.write(lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ,lowerCamelCase : List[Any]=None ):
if fail is not None:
with open(lowerCamelCase ,'r' ) as f:
_A : Optional[Any] = {l.strip() for l in f.readlines()}
else:
_A : Dict = None
with open(lowerCamelCase ,'r' ) as f:
_A : Union[str, Any] = f.readlines()
_A : List[Any] = defaultdict(lowerCamelCase )
for line in correct_lines:
_A , _A , _A , _A : Tuple = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
A : Optional[int] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 128 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : str ):
_A : Union[str, Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_A : List[Any] = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
_A : Optional[int] = F'{src_lang}-{tgt_lang}'
_A : str = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=lowerCamelCase ,exist_ok=lowerCamelCase )
_A : Dict = os.path.join(lowerCamelCase ,'README.md' )
print(F'Generating {path}' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(lowerCamelCase )
# make sure we are under the root of the project
A : List[str] = Path(__file__).resolve().parent.parent.parent
A : str = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
A : int = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 128 | 1 |
from __future__ import annotations
from collections import deque
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : list[dict] =[]
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(lowerCamelCase__ )
self.set_fail_transitions()
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Tuple =0
for character in keyword:
__UpperCamelCase : Tuple =self.find_next_state(lowerCamelCase__ , lowerCamelCase__ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__UpperCamelCase : Tuple =len(self.adlist ) - 1
else:
__UpperCamelCase : List[str] =next_state
self.adlist[current_state]["output"].append(lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : deque =deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCamelCase__ )
__UpperCamelCase : Dict =0
while q:
__UpperCamelCase : Dict =q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCamelCase__ )
__UpperCamelCase : List[str] =self.adlist[r]['fail_state']
while (
self.find_next_state(lowerCamelCase__ , self.adlist[child]['value'] ) is None
and state != 0
):
__UpperCamelCase : Union[str, Any] =self.adlist[state]['fail_state']
__UpperCamelCase : str =self.find_next_state(
lowerCamelCase__ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
__UpperCamelCase : List[str] =0
__UpperCamelCase : int =(
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : dict ={} # returns a dict with keywords and list of its occurrences
__UpperCamelCase : Union[str, Any] =0
for i in range(len(lowerCamelCase__ ) ):
while (
self.find_next_state(lowerCamelCase__ , string[i] ) is None
and current_state != 0
):
__UpperCamelCase : Optional[int] =self.adlist[current_state]['fail_state']
__UpperCamelCase : List[Any] =self.find_next_state(lowerCamelCase__ , string[i] )
if next_state is None:
__UpperCamelCase : Tuple =0
else:
__UpperCamelCase : int =next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__UpperCamelCase : Dict =[]
result[key].append(i - len(lowerCamelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 |
from timeit import timeit
A_ :Optional[int] = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def A ( a_ ) -> bool:
__UpperCamelCase : List[Any] =0
__UpperCamelCase : Dict =len(a_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def A ( a_ ) -> bool:
__UpperCamelCase : Optional[int] =len(a_ ) // 2
__UpperCamelCase : Tuple =len(a_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(a_ ) )
def A ( a_ ) -> bool:
if len(a_ ) <= 2:
return True
if s[0] == s[len(a_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def A ( a_ ) -> bool:
return s == s[::-1]
def A ( a_ ) -> None:
__UpperCamelCase : Optional[int] =F'all({name}(key) is value for key, value in test_data.items())'
__UpperCamelCase : Dict =F'from __main__ import test_data, {name}'
__UpperCamelCase : Optional[Any] =500_000
__UpperCamelCase : List[str] =timeit(stmt=a_ ,setup=a_ ,number=a_ )
print(F'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 154 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 200_0000 ):
"""simple docstring"""
return sum(takewhile(lambda __UpperCamelCase : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }") | 86 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 86 | 1 |
import pprint
import requests
__snake_case = """https://zenquotes.io/api"""
def A_ ( ) ->list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def A_ ( ) ->list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__snake_case = random_quotes()
pprint.pprint(response)
| 708 | '''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = StableUnCLIPPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A_ = False
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowercase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase_ = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_000 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase_ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCamelCase__ ( self : str , lowercase_ : int , lowercase_ : int=0 ):
'''simple docstring'''
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(lowercase_ )
else:
lowercase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowercase_ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ = pipe("""anime turle""" , generator=lowercase_ , output_type="""np""" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 603 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__snake_case = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
__snake_case = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = False
__lowercase : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__lowercase : Union[str, Any] = ()
__lowercase : List[str] = {} if is_torch_available() else {}
__lowercase : List[Any] = False
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = EsmFoldModelTester(self )
__snake_case = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold only has one output format.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
class lowerCAmelCase ( __lowerCAmelCase):
@slow
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
__snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )['''positions''']
__snake_case = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 |
from math import factorial, radians
def __A ( __lowerCamelCase , __lowerCamelCase = 18 , __lowerCamelCase = 10 ) -> float:
a = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
a = radians(__lowerCamelCase )
a = angle_in_radians
a = 3
a = -1
for _ in range(__lowerCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(__lowerCamelCase )
a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 468 | 0 |
__UpperCamelCase : int = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__UpperCamelCase : list[bool | None] = [None] * 1000_0000
__UpperCamelCase : int = True
__UpperCamelCase : Optional[int] = False
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ : Optional[int] = chain(next_number(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : str = number_chain
while number < 10000000:
UpperCamelCase__ : Optional[int] = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE : int = 10000000 ):
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 708 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__UpperCamelCase : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__UpperCamelCase : Optional[int] = "main"
# Default branch name
__UpperCamelCase : Optional[Any] = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__UpperCamelCase : str = "aaaaaaa"
# This commit does not exist, so we should 404.
__UpperCamelCase : Optional[Any] = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__UpperCamelCase : Tuple = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class __magic_name__ ( unittest.TestCase):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''start_positions''', '''end_positions'''] )
class __magic_name__ ( __lowerCAmelCase):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
@require_tf
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''start_positions''', '''end_positions'''] )
class __magic_name__ ( __lowerCAmelCase):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
@require_flax
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
class __magic_name__ ( __lowerCAmelCase):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
| 106 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False, False, False
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[int] = None
a : bool = True
a : bool = True
a : Optional[str] = None
# Automatically constructed
a : ClassVar[str] = "dict"
a : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
a : str = field(default="Audio" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__(self ) -> Optional[Any]:
'''simple docstring'''
return self.pa_type
def _UpperCAmelCase (self ,_lowerCamelCase ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__lowercase = BytesIO()
sf.write(_lowerCamelCase ,value['''array'''] ,value['''sampling_rate'''] ,format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__lowercase = np.frombuffer(value['''bytes'''] ,dtype=np.intaa ).astype(np.floataa ) / 32767
else:
__lowercase = np.memmap(value['''path'''] ,dtype='''h''' ,mode='''r''' ).astype(np.floataa ) / 32767
__lowercase = BytesIO(bytes() )
sf.write(_lowerCamelCase ,_lowerCamelCase ,value['''sampling_rate'''] ,format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
__lowercase , __lowercase = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
__lowercase = xsplitext(_lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
__lowercase = token_per_repo_id or {}
__lowercase = path.split('''::''' )[-1]
try:
__lowercase = string_to_dict(_lowerCamelCase ,config.HUB_DATASETS_URL )['''repo_id''']
__lowercase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__lowercase = None
with xopen(_lowerCamelCase ,'''rb''' ,use_auth_token=_lowerCamelCase ) as f:
__lowercase , __lowercase = sf.read(_lowerCamelCase )
else:
__lowercase , __lowercase = sf.read(_lowerCamelCase )
__lowercase = array.T
if self.mono:
__lowercase = librosa.to_mono(_lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__lowercase = librosa.resample(_lowerCamelCase ,orig_sr=_lowerCamelCase ,target_sr=self.sampling_rate )
__lowercase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _UpperCAmelCase (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def _UpperCAmelCase (self ,_lowerCamelCase ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__lowercase = pa.array([None] * len(_lowerCamelCase ) ,type=pa.binary() )
__lowercase = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowercase = pa.array([None] * len(_lowerCamelCase ) ,type=pa.string() )
__lowercase = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
__lowercase = pa.array([Audio().encode_example(_lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__lowercase = storage.field('''bytes''' )
else:
__lowercase = pa.array([None] * len(_lowerCamelCase ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__lowercase = storage.field('''path''' )
else:
__lowercase = pa.array([None] * len(_lowerCamelCase ) ,type=pa.string() )
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
return array_cast(_lowerCamelCase ,self.pa_type )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase ):
with xopen(_lowerCamelCase ,'''rb''' ) as f:
__lowercase = f.read()
return bytes_
__lowercase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
__lowercase = pa.array(
[os.path.basename(_lowerCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
__lowercase = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_lowerCamelCase ,self.pa_type )
| 502 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = Namespace(**checkpoint['''cfg''']['''model'''] )
__lowercase = checkpoint['''model''']
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__lowercase = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
__lowercase = XGLMConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__lowercase = XGLMForCausalLM(lowerCamelCase_ )
__lowercase = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
print(lowerCamelCase_ )
__lowercase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 502 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
__a : Union[str, Any] = 42
__a : Optional[Any] = None
@staticmethod
def A ( ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError
def A ( self : Tuple , lowercase : Optional[int] , lowercase : int , lowercase : str , **lowercase : str ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError
def A ( self : int , lowercase : Dict ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def A ( cls : Any ) -> List[str]:
'''simple docstring'''
return f"`pip install {cls.pip_package or cls.name}`"
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
__a : str = "optuna"
@staticmethod
def A ( ) -> List[str]:
'''simple docstring'''
return is_optuna_available()
def A ( self : Any , lowercase : Union[str, Any] , lowercase : int , lowercase : str , **lowercase : Dict ) -> Any:
'''simple docstring'''
return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def A ( self : str , lowercase : Any ) -> List[str]:
'''simple docstring'''
return default_hp_space_optuna(lowerCamelCase_ )
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
__a : Union[str, Any] = "ray"
__a : List[Any] = "\'ray[tune]\'"
@staticmethod
def A ( ) -> str:
'''simple docstring'''
return is_ray_available()
def A ( self : Any , lowercase : Any , lowercase : int , lowercase : str , **lowercase : int ) -> Optional[int]:
'''simple docstring'''
return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def A ( self : List[str] , lowercase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_ray(lowerCamelCase_ )
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
__a : List[str] = "sigopt"
@staticmethod
def A ( ) -> Dict:
'''simple docstring'''
return is_sigopt_available()
def A ( self : Optional[Any] , lowercase : Any , lowercase : int , lowercase : str , **lowercase : int ) -> str:
'''simple docstring'''
return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def A ( self : List[Any] , lowercase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_sigopt(lowerCamelCase_ )
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
__a : Optional[int] = "wandb"
@staticmethod
def A ( ) -> Any:
'''simple docstring'''
return is_wandb_available()
def A ( self : Union[str, Any] , lowercase : str , lowercase : int , lowercase : str , **lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def A ( self : Union[str, Any] , lowercase : Tuple ) -> Any:
'''simple docstring'''
return default_hp_space_wandb(lowerCamelCase_ )
lowerCamelCase_ : List[Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCamelCase_ ) > 0:
UpperCamelCase__ = available_backends[0].name
if len(lowerCamelCase_ ) > 1:
logger.info(
f"{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 713 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Tuple = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ['''LayoutLMv3FeatureExtractor''']
lowerCamelCase_ : Dict = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 265 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Tuple = '''bloom'''
snake_case__ : Dict = ['''past_key_values''']
snake_case__ : str = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , a__=250880 , a__=64 , a__=2 , a__=8 , a__=1e-5 , a__=0.02 , a__=True , a__=1 , a__=2 , a__=False , a__=0.0 , a__=0.0 , a__=1 , a__=False , **a__ , ):
__SCREAMING_SNAKE_CASE : Dict = vocab_size
# Backward compatibility with n_embed kwarg
__SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("n_embed" , a__ )
__SCREAMING_SNAKE_CASE : Any = hidden_size if n_embed is None else n_embed
__SCREAMING_SNAKE_CASE : int = n_layer
__SCREAMING_SNAKE_CASE : Optional[Any] = n_head
__SCREAMING_SNAKE_CASE : List[Any] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : str = use_cache
__SCREAMING_SNAKE_CASE : int = pretraining_tp
__SCREAMING_SNAKE_CASE : Union[str, Any] = apply_residual_connection_post_layernorm
__SCREAMING_SNAKE_CASE : int = hidden_dropout
__SCREAMING_SNAKE_CASE : Tuple = attention_dropout
__SCREAMING_SNAKE_CASE : Tuple = bos_token_id
__SCREAMING_SNAKE_CASE : Dict = eos_token_id
__SCREAMING_SNAKE_CASE : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__ )
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : int = version.parse('''1.12''' )
def __init__( self , a__ , a__ = "default" , a__ = None , a__ = False , ):
super().__init__(a__ , task=a__ , patching_specs=a__ , use_past=a__ )
if not getattr(self._config , "pad_token_id" , a__ ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
@property
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(a__ , direction="inputs" , inverted_values_shape=a__ )
__SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
__SCREAMING_SNAKE_CASE : int = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a_ ( self ):
return self._config.n_layer
@property
def a_ ( self ):
return self._config.n_head
@property
def a_ ( self ):
return 1e-3
def a_ ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ):
__SCREAMING_SNAKE_CASE : List[Any] = super(a__ , self ).generate_dummy_inputs(
a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE : Union[str, Any] = seqlen + 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._config.hidden_size // self.num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__SCREAMING_SNAKE_CASE : Any = [
(torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE : int = common_inputs["attention_mask"]
if self.use_past:
__SCREAMING_SNAKE_CASE : Dict = ordered_inputs["attention_mask"].dtype
__SCREAMING_SNAKE_CASE : int = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 )
return ordered_inputs
@property
def a_ ( self ):
return 13
| 211 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 211 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def lowercase_ ( A__ , A__ , A__ , A__ , A__ = True ) -> Dict:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
snake_case = timm.create_model("levit_128s" , pretrained=A__ )
else:
snake_case = timm.create_model("levit_128" , pretrained=A__ )
if hidden_sizes == 192:
snake_case = timm.create_model("levit_192" , pretrained=A__ )
if hidden_sizes == 256:
snake_case = timm.create_model("levit_256" , pretrained=A__ )
if hidden_sizes == 384:
snake_case = timm.create_model("levit_384" , pretrained=A__ )
from_model.eval()
snake_case = LevitForImageClassificationWithTeacher(A__ ).eval()
snake_case = OrderedDict()
snake_case = from_model.state_dict()
snake_case = list(from_model.state_dict().keys() )
snake_case = list(our_model.state_dict().keys() )
print(len(A__ ) , len(A__ ) )
for i in range(len(A__ ) ):
snake_case = weights[og_keys[i]]
our_model.load_state_dict(A__ )
snake_case = torch.randn((2, 3, 224, 224) )
snake_case = from_model(A__ )
snake_case = our_model(A__ ).logits
assert torch.allclose(A__ , A__ ), "The model logits don't match the original one."
snake_case = name
print(A__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def lowercase_ ( A__ , A__ = None , A__ = True ) -> List[Any]:
"""simple docstring"""
snake_case = "imagenet-1k-id2label.json"
snake_case = 1000
snake_case = (1, num_labels)
snake_case = "huggingface/label-files"
snake_case = num_labels
snake_case = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
snake_case = {int(A__ ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = partial(A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ )
snake_case = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
snake_case = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , A__ , names_to_config[model_name] , A__ , A__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , A__ , A__ , A__ , A__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 719 |
def lowercase_ ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case = min(A__ , A__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 294 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a : Any = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
a : List[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
a : Any = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
def remove_articles(__lowerCamelCase : List[str] ):
__UpperCAmelCase : List[str] = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__lowerCamelCase , """ """ , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Any ):
__UpperCAmelCase : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = [any(compute_exact(__lowerCamelCase , __lowerCamelCase ) for ref in refs ) for pred, refs in zip(__lowerCamelCase , __lowerCamelCase )]
return (sum(__lowerCamelCase ) / len(__lowerCamelCase )) * 100
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : Dict = [rgram for rgrams in rgramslist for rgram in rgrams]
__UpperCAmelCase : Optional[int] = Counter(__lowerCamelCase )
__UpperCAmelCase : List[Any] = Counter(__lowerCamelCase )
__UpperCAmelCase : str = Counter()
for sgram, scount in sgramcounter.items():
__UpperCAmelCase : int = scount * numref
__UpperCAmelCase : Union[str, Any] = Counter(__lowerCamelCase )
__UpperCAmelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
__UpperCAmelCase : str = ccount * numref
# KEEP
__UpperCAmelCase : Dict = sgramcounter_rep & cgramcounter_rep
__UpperCAmelCase : str = keepgramcounter_rep & rgramcounter
__UpperCAmelCase : Optional[Any] = sgramcounter_rep & rgramcounter
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : int = 1
__UpperCAmelCase : Union[str, Any] = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Optional[int] = keeptmpscorea / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__UpperCAmelCase : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__UpperCAmelCase : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__UpperCAmelCase : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__UpperCAmelCase : List[str] = sgramcounter_rep - cgramcounter_rep
__UpperCAmelCase : Union[str, Any] = delgramcounter_rep - rgramcounter
__UpperCAmelCase : Union[str, Any] = sgramcounter_rep - rgramcounter
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Dict = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Union[str, Any] = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Any = deltmpscorea / len(__lowerCamelCase )
# ADDITION
__UpperCAmelCase : Optional[int] = set(__lowerCamelCase ) - set(__lowerCamelCase )
__UpperCAmelCase : List[str] = set(__lowerCamelCase ) & set(__lowerCamelCase )
__UpperCAmelCase : Tuple = set(__lowerCamelCase ) - set(__lowerCamelCase )
__UpperCAmelCase : Any = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : str = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Dict = addtmpscore / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : str = addtmpscore / len(__lowerCamelCase )
__UpperCAmelCase : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
__UpperCAmelCase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[int] = len(__lowerCamelCase )
__UpperCAmelCase : Any = ssent.split(""" """ )
__UpperCAmelCase : List[str] = csent.split(""" """ )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[int] = []
for rsent in rsents:
__UpperCAmelCase : List[str] = rsent.split(""" """ )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : int = []
__UpperCAmelCase : str = []
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Optional[Any] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Tuple = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Any = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Union[str, Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Optional[int] = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Dict = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Tuple = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[Any] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Union[str, Any] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : int = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__UpperCAmelCase : Optional[int] = sum([delascore, delascore, delascore, delascore] ) / 4
__UpperCAmelCase : List[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
__UpperCAmelCase : int = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : bool = True , __lowerCamelCase : str = "13a" , __lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__UpperCAmelCase : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__UpperCAmelCase : List[str] = sacrebleu.metrics.bleu._get_tokenizer(__lowerCamelCase )()(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCamelCase )
elif tokenizer == "moses":
__UpperCAmelCase : Optional[int] = sacremoses.MosesTokenizer().tokenize(__lowerCamelCase , return_str=__lowerCamelCase , escape=__lowerCamelCase )
elif tokenizer == "penn":
__UpperCAmelCase : Optional[int] = sacremoses.MosesTokenizer().penn_tokenize(__lowerCamelCase , return_str=__lowerCamelCase )
else:
__UpperCAmelCase : str = sentence
if not return_str:
__UpperCAmelCase : Optional[int] = normalized_sent.split()
return normalized_sent
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int ):
if not (len(__lowerCamelCase ) == len(__lowerCamelCase ) == len(__lowerCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
__UpperCAmelCase : Union[str, Any] = 0
for src, pred, refs in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
sari_score += SARIsent(normalize(__lowerCamelCase ) , normalize(__lowerCamelCase ) , [normalize(__lowerCamelCase ) for sent in refs] )
__UpperCAmelCase : List[str] = sari_score / len(__lowerCamelCase )
return 100 * sari_score
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]="exp" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=False , ):
__UpperCAmelCase : Optional[int] = len(references[0] )
if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__UpperCAmelCase : Optional[Any] = [[refs[i] for refs in references] for i in range(__lowerCamelCase )]
__UpperCAmelCase : Union[str, Any] = sacrebleu.corpus_bleu(
__lowerCamelCase , __lowerCamelCase , smooth_method=__lowerCamelCase , smooth_value=__lowerCamelCase , force=__lowerCamelCase , lowercase=__lowerCamelCase , use_effective_order=__lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase ( self : int , __lowercase : str , __lowercase : Optional[int] , __lowercase : int ) -> Union[str, Any]:
__UpperCAmelCase : str = {}
result.update({"""sari""": compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=__lowercase , references=__lowercase )} )
result.update({"""exact""": compute_em(predictions=__lowercase , references=__lowercase )} )
return result
| 63 | import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a__ :
def __UpperCamelCase ( self : List[Any],_A : Dict,_A : str,_A : Any ):
"""simple docstring"""
return None
class a__ :
def __UpperCamelCase ( self : Tuple,_A : Any,_A : Any,_A : str,_A : Optional[int] ):
"""simple docstring"""
return None
class a__ ( unittest.TestCase ):
A = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A,"tf",12,**_A )
@require_torch
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A,"pt",12,**_A )
@require_torch
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(_A ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE_ : str = BertModel(BertConfig(vocab_size=len(_A ) ) )
model.save_pretrained(_A )
self._test_export(_A,"pt",12,_A )
@require_tf
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE_ : List[str] = self._test_export(_A,"tf",12,**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = quantize(Path(_A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE_ : int = self._test_export(_A,"pt",12,**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = quantize(_A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def __UpperCamelCase ( self : List[str],_A : Optional[int],_A : Optional[int],_A : Optional[Any],_A : Dict=None,**_A : List[Any] ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ : Dict = Path(_A ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_A,_A,_A,_A,_A,**_A )
return path
except Exception as e:
self.fail(_A )
@require_torch
@require_tokenizers
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE_ : Optional[int] = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE_ : List[str] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_A,_A,"pt" )
@require_tf
@require_tokenizers
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
from transformers import TFBertModel
SCREAMING_SNAKE_CASE_ : List[Any] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_A,_A,"tf" )
def __UpperCamelCase ( self : Union[str, Any],_A : Dict,_A : List[str],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = FeatureExtractionPipeline(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = infer_shapes(_A,_A )
# Assert all variables are present
self.assertEqual(len(_A ),len(_A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3],_A )
self.assertSequenceEqual(variable_names[3:],_A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name],{0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"],{0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"],{0: "batch"} )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = ensure_valid_input(FuncContiguousArgs(),_A,_A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_A ),3 )
# Should have exactly the same input names
self.assertEqual(set(_A ),set(_A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_A,(tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = ensure_valid_input(FuncNonContiguousArgs(),_A,_A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_A ),1 )
self.assertEqual(len(_A ),1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0],tokens["input_ids"] )
self.assertEqual(ordered_input_names[0],"input_ids" )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ),"-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx",generated.as_posix() )
| 216 | 0 |
from __future__ import annotations
class __magic_name__ :
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:list[list[int]] ):
snake_case__ = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_a ) != 0:
snake_case__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_a ) != cols:
raise error
for value in row:
if not isinstance(_a , (int, float) ):
raise error
snake_case__ = rows
else:
snake_case__ = []
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:int , _a:int ):
snake_case__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_a ).determinant()
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:int , _a:int ):
if (row + column) % 2 == 0:
return self.get_minor(_a , _a )
return -1 * self.get_minor(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return Matrix(
[
[self.get_minor(_a , _a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self:Any ):
return str(self.rows )
def __str__( self:int ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_a ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:list[int] , _a:int | None = None ):
snake_case__ = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_a , _a ):
raise type_error
for value in row:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_a )
else:
snake_case__ = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self:str , _a:list[int] , _a:int | None = None ):
snake_case__ = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_a , _a ):
raise type_error
for value in column:
if not isinstance(_a , (int, float) ):
raise type_error
if len(_a ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
snake_case__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
snake_case__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self:Any , _a:object ):
if not isinstance(_a , _a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self:int , _a:object ):
return not self == other
def __neg__( self:int ):
return self * -1
def __add__( self:Dict , _a:Matrix ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self:Any , _a:Matrix ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self:List[Any] , _a:Matrix | int | float ):
if isinstance(_a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_a , _a ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_a , _a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self:Tuple , _a:int ):
if not isinstance(_a , _a ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
snake_case__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls:List[str] , _a:list[int] , _a:list[int] ):
return sum(row[i] * column[i] for i in range(len(_a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Optional[int] = logging.get_logger()
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : nn.Module
__lowercase : List[nn.Module] = field(default_factory=snake_case_ )
__lowercase : list = field(default_factory=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:int , _a:Tensor , _a:Tensor ):
snake_case__ = len(list(m.modules() ) ) == 1 or isinstance(_a , nn.Convad ) or isinstance(_a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_a )
def __call__( self:int , _a:Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_a )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : nn.Module
__lowercase : nn.Module
__lowercase : int = 0
__lowercase : List = field(default_factory=snake_case_ )
__lowercase : List = field(default_factory=snake_case_ )
def __call__( self:List[Any] , _a:Tensor ):
snake_case__ = Tracker(self.dest )(_a ).parametrized
snake_case__ = Tracker(self.src )(_a ).parametrized
snake_case__ = list(filter(lambda _a : type(_a ) not in self.src_skip , _a ) )
snake_case__ = list(filter(lambda _a : type(_a ) not in self.dest_skip , _a ) )
if len(_a ) != len(_a ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(_a )} operations while"""
F""" destination module has {len(_a )}.""" )
for dest_m, src_m in zip(_a , _a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ) -> List[str]:
print(F"""Converting {name}...""" )
with torch.no_grad():
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
snake_case__ = ResNetForImageClassification(__lowerCAmelCase ).eval()
snake_case__ = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase )
snake_case__ = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCAmelCase )
assert torch.allclose(from_model(__lowerCAmelCase ) , our_model(__lowerCAmelCase ).logits ), "The model logits don't match the original one."
snake_case__ = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowerCAmelCase , )
# we can use the convnext one
snake_case__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowerCAmelCase , )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ) -> List[str]:
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = 1000
snake_case__ = (1, num_labels)
snake_case__ = '''huggingface/label-files'''
snake_case__ = num_labels
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
snake_case__ = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase__ : str = parser.parse_args()
lowerCamelCase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 208 | 1 |
"""simple docstring"""
from torch import nn
class __a ( nn.Module ):
def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] )-> List[str]:
"""simple docstring"""
super().__init__()
UpperCamelCase = class_size
UpperCamelCase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] )-> str:
"""simple docstring"""
UpperCamelCase = self.mlp(lowerCAmelCase__ )
return logits
| 554 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase ( A : dict , A : str , A : set , A : set , A : dict , A : dict , A : PriorityQueue , A : dict , A : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
SCREAMING_SNAKE_CASE : str = cst_fwd.get(A , np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
SCREAMING_SNAKE_CASE : Tuple = new_cost_f
SCREAMING_SNAKE_CASE : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
SCREAMING_SNAKE_CASE : Union[str, Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase ( A : str , A : str , A : dict , A : dict ):
SCREAMING_SNAKE_CASE : Dict = -1
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : Optional[Any] = set()
SCREAMING_SNAKE_CASE : Any = {source: 0}
SCREAMING_SNAKE_CASE : Tuple = {destination: 0}
SCREAMING_SNAKE_CASE : Union[str, Any] = {source: None}
SCREAMING_SNAKE_CASE : Optional[int] = {destination: None}
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = queue_forward.get()
visited_forward.add(A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = queue_backward.get()
visited_backward.add(A )
SCREAMING_SNAKE_CASE : Tuple = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
SCREAMING_SNAKE_CASE : Optional[int] = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
SCREAMING_SNAKE_CASE : int = shortest_distance
return shortest_path_distance
lowerCAmelCase_ : int = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
lowerCAmelCase_ : str = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 | 0 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCAmelCase__ = HfArgumentParser(InitializationArguments)
lowerCAmelCase__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCAmelCase__ = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
lowerCAmelCase__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCAmelCase__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 717 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 624 | 0 |
# Imports
import numpy as np
class __magic_name__ :
'''simple docstring'''
def __init__( self:List[str] , _a:Union[str, Any]=None , _a:List[Any]=None , _a:Tuple=None , _a:Dict=None , _a:int=None ):
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Dict=None , _a:Any=None , _a:Optional[int]=None , _a:str=None , _a:Optional[int]=None ):
if red is not None:
snake_case__ = red
if green is not None:
snake_case__ = green
if blue is not None:
snake_case__ = blue
if red_edge is not None:
snake_case__ = red_edge
if nir is not None:
snake_case__ = nir
return True
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any]="" , _a:List[str]=None , _a:Optional[int]=None , _a:List[Any]=None , _a:Any=None , _a:List[str]=None ):
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
snake_case__ = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self.nir * (self.red / (self.green**2))
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.nir - self.red) / (self.nir + self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return (self.nir - self.green) / (self.nir + self.green)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Optional[Any]=0.08 , _a:Dict=1.22 , _a:str=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return (self.nir / self.green) - 1
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.nir / self.redEdge) - 1
def SCREAMING_SNAKE_CASE__ ( self:int ):
return (self.red - self.blue) / self.red
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return self.nir - self.green
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Any=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Any=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:List[Any]=None , _a:Optional[int]=None ):
return (self.nir - b) / (a * self.red)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return (self.red + self.green + self.blue) / 30.5
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self:int ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self.green / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return self.nir / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:str ):
return self.red / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return (self.green - self.red) / (self.green + self.red)
def SCREAMING_SNAKE_CASE__ ( self:int ):
return (self.red - self.green) / (self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
snake_case__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (self.ndvi() + 0.5) ** (1 / 2)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 33 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCamelCase : Dict = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def a ( ) -> str:
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ : str = CursorInfo()
SCREAMING_SNAKE_CASE__ : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(A__ , ctypes.byref(A__ ) )
SCREAMING_SNAKE_CASE__ : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(A__ , ctypes.byref(A__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ : Tuple = CursorInfo()
SCREAMING_SNAKE_CASE__ : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(A__ , ctypes.byref(A__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(A__ , ctypes.byref(A__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a ( ) -> List[Any]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 250 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
a_ :Union[str, Any] = 5
a_ :int = 10
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = SpeechaTextTokenizer
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = True
def lowercase__ ( self : int ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Any = sp.SentencePieceProcessor()
spm_model.Load(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
SCREAMING_SNAKE_CASE__ : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : str = '''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_lowercase ) , 10_01 )
def lowercase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def lowercase__ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = {'''input_ids''': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCamelCase : List[Any] = '''C\'est trop cool'''
lowerCamelCase : Any = '''Esto es genial'''
@classmethod
def lowercase__ ( cls : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self : str ):
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def lowercase__ ( self : Tuple ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def lowercase__ ( self : Optional[int] ):
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Tuple = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''fr'''
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ : int = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 250 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = 0
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Tuple = Path(A_ ) / "preprocessor_config.json"
UpperCamelCase : List[str] = Path(A_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(A_ , "w" ) , )
json.dump({"model_type": "clip"} , open(A_ , "w" ) )
UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Tuple = Path(A_ ) / "preprocessor_config.json"
UpperCamelCase : Optional[int] = Path(A_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(A_ , "w" ) , )
json.dump({"model_type": "clip"} , open(A_ , "w" ) )
UpperCamelCase : int = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCamelCase : Optional[int] = Path(A_ ) / "preprocessor_config.json"
UpperCamelCase : str = Path(A_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(A_ , "w" ) , )
json.dump({"model_type": "clip"} , open(A_ , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCamelCase : str = AutoImageProcessor.from_pretrained(A_ ).to_dict()
config_dict.pop("image_processor_type" )
UpperCamelCase : Optional[Any] = CLIPImageProcessor(**A_ )
# save in new folder
model_config.save_pretrained(A_ )
config.save_pretrained(A_ )
UpperCamelCase : int = AutoImageProcessor.from_pretrained(A_ )
# make sure private variable is not incorrectly saved
UpperCamelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Union[str, Any] = Path(A_ ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(A_ , "w" ) , )
UpperCamelCase : Any = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , "clip-base is not a local folder and is not a valid model identifier" ):
UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("clip-base" )
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(A_ , revision="aaaaaa" )
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaisesRegex(
A_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCamelCase : Optional[int] = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def __UpperCamelCase( self ):
'''simple docstring'''
with self.assertRaises(A_ ):
UpperCamelCase : Tuple = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=A_ )
UpperCamelCase : Optional[int] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
UpperCamelCase : str = AutoImageProcessor.from_pretrained(A_ , trust_remote_code=A_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def __UpperCamelCase( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , A_ )
AutoImageProcessor.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoImageProcessor.register(A_ , A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Tuple = Path(A_ ) / "preprocessor_config.json"
UpperCamelCase : str = Path(A_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(A_ , "w" ) , )
json.dump({"model_type": "clip"} , open(A_ , "w" ) )
UpperCamelCase : str = CustomImageProcessor.from_pretrained(A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase( self ):
'''simple docstring'''
class A__ ( __snake_case ):
_UpperCAmelCase :str = True
try:
AutoConfig.register("custom" , A_ )
AutoImageProcessor.register(A_ , A_ )
# If remote code is not set, the default is to use local
UpperCamelCase : Optional[int] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(A_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 629 |
import numpy as np
import qiskit
def A_ ( _lowerCAmelCase = 8 , _lowerCAmelCase = None ) -> str:
UpperCamelCase : Tuple = np.random.default_rng(seed=_lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
UpperCamelCase : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# The set of states Alice will prepare.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# Measurement basis for Bob's qubits.
UpperCamelCase : Optional[int] = rng.integers(2 , size=_lowerCAmelCase )
# Quantum Circuit to simulate BB84
UpperCamelCase : List[Any] = qiskit.QuantumCircuit(_lowerCAmelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
UpperCamelCase : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
UpperCamelCase : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1 , seed_simulator=_lowerCAmelCase )
# Returns the result of measurement.
UpperCamelCase : Optional[Any] = job.result().get_counts(_lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
UpperCamelCase : Tuple = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
UpperCamelCase : Tuple = gen_key[:key_len] if len(_lowerCAmelCase ) >= key_len else gen_key.ljust(_lowerCAmelCase , "0" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 629 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase :Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
__lowercase :Optional[int] = 5
__lowercase :Optional[Any] = 10
@require_sentencepiece
@require_tokenizers
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SpeechaTextTokenizer
snake_case_ = False
snake_case_ = True
def A_ ( self : Union[str, Any] ) ->Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = sp.SentencePieceProcessor()
spm_model.Load(a )
SCREAMING_SNAKE_CASE__ : Any = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(a ) )]
SCREAMING_SNAKE_CASE__ : List[str] = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE__ : str = Path(self.tmpdirname )
save_json(a , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a , save_dir / VOCAB_FILES_NAMES["spm_file"] )
SCREAMING_SNAKE_CASE__ : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[int] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = "<pad>"
SCREAMING_SNAKE_CASE__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def A_ ( self : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(a ) , 10_01 )
def A_ ( self : List[Any] ) ->Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def A_ ( self : int ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def A_ ( self : List[str] ) ->Tuple:
# fmt: off
SCREAMING_SNAKE_CASE__ : Tuple = {"input_ids": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = "valhalla/s2t_mustc_multilinguial_medium"
snake_case_ = "C'est trop cool"
snake_case_ = "Esto es genial"
@classmethod
def A_ ( cls : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def A_ ( self : Any ) ->Dict:
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def A_ ( self : Union[str, Any] ) ->str:
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def A_ ( self : int ) ->Union[str, Any]:
self.assertIn(a , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : List[Any] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = "fr"
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , a )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def A_ ( self : int ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : int = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] ) | 717 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser | 26 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE (a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase = StableUnCLIPImgaImgPipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase = frozenset([] )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 32
__A : List[str] = embedder_hidden_size
# image encoding components
__A : List[Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__A : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__A : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase)
__A : Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
__A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
__A : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ))
torch.manual_seed(0)
__A : Dict = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0)
__A : Union[str, Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0)
__A : Any = AutoencoderKL()
__A : List[str] = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=0 , _UpperCAmelCase=True):
'''simple docstring'''
if str(_UpperCAmelCase).startswith('mps'):
__A : Tuple = torch.manual_seed(_UpperCAmelCase)
else:
__A : Any = torch.Generator(device=_UpperCAmelCase).manual_seed(_UpperCAmelCase)
__A : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase)).to(_UpperCAmelCase)
if pil_image:
__A : List[Any] = input_image * 0.5 + 0.5
__A : Dict = input_image.clamp(0 , 1)
__A : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__A : Tuple = DiffusionPipeline.numpy_to_pil(_UpperCAmelCase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__A : Union[str, Any] = self.get_dummy_components()
__A : Dict = StableUnCLIPImgaImgPipeline(**_UpperCAmelCase)
__A : Dict = sd_pipe.to(_UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Dict = self.get_dummy_inputs(_UpperCAmelCase)
inputs.update({'image_embeds': None})
__A : Any = sd_pipe(**_UpperCAmelCase).images
__A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__A : List[Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_UpperCAmelCase)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
__A : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy')
__A : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A : Optional[Any] = torch.Generator(device='cpu').manual_seed(0)
__A : Dict = pipe(_UpperCAmelCase , 'anime turle' , generator=_UpperCAmelCase , output_type='np')
__A : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
__A : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy')
__A : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A : str = torch.Generator(device='cpu').manual_seed(0)
__A : Any = pipe(_UpperCAmelCase , 'anime turle' , generator=_UpperCAmelCase , output_type='np')
__A : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
__A : int = pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__A : Optional[int] = pipe(
_UpperCAmelCase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
__A : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 8 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowercase ( __UpperCAmelCase ):
def __init__( self , **UpperCamelCase_ ):
super().__init__(**UpperCamelCase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , UpperCamelCase_ , **UpperCamelCase_ ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
__magic_name__ = {}
if "candidate_labels" in kwargs:
__magic_name__ = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__magic_name__ = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="This is a photo of {}." ):
__magic_name__ = load_image(UpperCamelCase_ )
__magic_name__ = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ = candidate_labels
__magic_name__ = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
__magic_name__ = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
__magic_name__ = [text_inputs]
return inputs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = model_inputs.pop('''candidate_labels''' )
__magic_name__ = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
__magic_name__ = text_inputs[0]
else:
# Batching case.
__magic_name__ = text_inputs[0][0]
__magic_name__ = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = model_outputs.pop('''candidate_labels''' )
__magic_name__ = model_outputs['''logits'''][0]
if self.framework == "pt":
__magic_name__ = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = [scores]
elif self.framework == "tf":
__magic_name__ = stable_softmax(UpperCamelCase_ , axis=-1 )
__magic_name__ = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__magic_name__ = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 490 | 0 |
"""simple docstring"""
A: Union[str, Any] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 359 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _snake_case ( UpperCamelCase : bytes , UpperCamelCase : int ):
UpperCAmelCase : Tuple = F"{sampling_rate}"
UpperCAmelCase : Optional[int] = """1"""
UpperCAmelCase : Dict = """f32le"""
UpperCAmelCase : Optional[int] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(UpperCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase : str = ffmpeg_process.communicate(UpperCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
UpperCAmelCase : Tuple = output_stream[0]
UpperCAmelCase : Union[str, Any] = np.frombuffer(UpperCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _snake_case ( UpperCamelCase : int , UpperCamelCase : float , UpperCamelCase : str = "f32le" , ):
UpperCAmelCase : Tuple = F"{sampling_rate}"
UpperCAmelCase : List[str] = """1"""
if format_for_conversion == "s16le":
UpperCAmelCase : int = 2
elif format_for_conversion == "f32le":
UpperCAmelCase : int = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
UpperCAmelCase : Optional[Any] = platform.system()
if system == "Linux":
UpperCAmelCase : Tuple = """alsa"""
UpperCAmelCase : Any = """default"""
elif system == "Darwin":
UpperCAmelCase : Any = """avfoundation"""
UpperCAmelCase : int = """:0"""
elif system == "Windows":
UpperCAmelCase : Optional[int] = """dshow"""
UpperCAmelCase : str = """default"""
UpperCAmelCase : Dict = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
UpperCAmelCase : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase : Union[str, Any] = _ffmpeg_stream(UpperCamelCase , UpperCamelCase )
for item in iterator:
yield item
def _snake_case ( UpperCamelCase : int , UpperCamelCase : float , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[Tuple[float, float], float]] = None , UpperCamelCase : str = "f32le" , ):
if stream_chunk_s is not None:
UpperCAmelCase : List[Any] = stream_chunk_s
else:
UpperCAmelCase : List[Any] = chunk_length_s
UpperCAmelCase : Union[str, Any] = ffmpeg_microphone(UpperCamelCase , UpperCamelCase , format_for_conversion=UpperCamelCase )
if format_for_conversion == "s16le":
UpperCAmelCase : Dict = np.intaa
UpperCAmelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCAmelCase : Any = np.floataa
UpperCAmelCase : Dict = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
UpperCAmelCase : Tuple = chunk_length_s / 6
UpperCAmelCase : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCamelCase , (int, float) ):
UpperCAmelCase : str = [stride_length_s, stride_length_s]
UpperCAmelCase : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase : List[Any] = datetime.datetime.now()
UpperCAmelCase : Tuple = datetime.timedelta(seconds=UpperCamelCase )
for item in chunk_bytes_iter(UpperCamelCase , UpperCamelCase , stride=(stride_left, stride_right) , stream=UpperCamelCase ):
# Put everything back in numpy scale
UpperCAmelCase : Dict = np.frombuffer(item["""raw"""] , dtype=UpperCamelCase )
UpperCAmelCase : List[str] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
UpperCAmelCase : Optional[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Tuple[int, int] , UpperCamelCase : bool = False ):
UpperCAmelCase : Any = B""""""
UpperCAmelCase , UpperCAmelCase : List[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
UpperCAmelCase : int = 0
for raw in iterator:
acc += raw
if stream and len(UpperCamelCase ) < chunk_len:
UpperCAmelCase : Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase : List[str] = (_stride_left, stride_right)
UpperCAmelCase : List[str] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
UpperCAmelCase : List[str] = False
yield item
UpperCAmelCase : Optional[Any] = stride_left
UpperCAmelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCamelCase ) > stride_left:
UpperCAmelCase : Union[str, Any] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
UpperCAmelCase : str = False
yield item
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
UpperCAmelCase : List[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCamelCase , stdout=subprocess.PIPE , bufsize=UpperCamelCase ) as ffmpeg_process:
while True:
UpperCAmelCase : Optional[Any] = ffmpeg_process.stdout.read(UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 359 | 1 |
"""simple docstring"""
import operator
def a ( __snake_case : list, __snake_case : bool = False, __snake_case : list | None = None ):
'''simple docstring'''
UpperCAmelCase_ :List[Any] = operator.lt if reverse else operator.gt
UpperCAmelCase_ :Optional[int] = solution or []
if not arr:
return solution
UpperCAmelCase_ :str = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case, sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
UpperCAmelCase_ :Any = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case, __snake_case ):
solution.insert(__snake_case, __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case, __snake_case, __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 608 |
"""simple docstring"""
def a ( __snake_case : int, __snake_case : list ):
'''simple docstring'''
_enforce_args(__snake_case, __snake_case )
if n == 0:
return 0
UpperCAmelCase_ :Optional[int] = float('''-inf''' )
for i in range(1, n + 1 ):
UpperCAmelCase_ :Any = max(
__snake_case, prices[i - 1] + naive_cut_rod_recursive(n - i, __snake_case ) )
return max_revue
def a ( __snake_case : int, __snake_case : list ):
'''simple docstring'''
_enforce_args(__snake_case, __snake_case )
UpperCAmelCase_ :Any = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__snake_case, __snake_case, __snake_case )
def a ( __snake_case : int, __snake_case : list, __snake_case : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCAmelCase_ :str = float('''-inf''' )
for i in range(1, n + 1 ):
UpperCAmelCase_ :List[str] = max(
__snake_case, prices[i - 1] + _top_down_cut_rod_recursive(n - i, __snake_case, __snake_case ), )
UpperCAmelCase_ :Union[str, Any] = max_revenue
return max_rev[n]
def a ( __snake_case : int, __snake_case : list ):
'''simple docstring'''
_enforce_args(__snake_case, __snake_case )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCAmelCase_ :Dict = [float('''-inf''' ) for _ in range(n + 1 )]
UpperCAmelCase_ :int = 0
for i in range(1, n + 1 ):
UpperCAmelCase_ :int = max_rev[i]
for j in range(1, i + 1 ):
UpperCAmelCase_ :List[str] = max(__snake_case, prices[j - 1] + max_rev[i - j] )
UpperCAmelCase_ :Tuple = max_revenue_i
return max_rev[n]
def a ( __snake_case : int, __snake_case : list ):
'''simple docstring'''
if n < 0:
UpperCAmelCase_ :Dict = f'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(__snake_case )
if n > len(__snake_case ):
UpperCAmelCase_ :Union[str, Any] = (
'''Each integral piece of rod must have a corresponding price. '''
f'Got n = {n} but length of prices = {len(__snake_case )}'
)
raise ValueError(__snake_case )
def a ( ):
'''simple docstring'''
UpperCAmelCase_ :int = [6, 10, 12, 15, 20, 23]
UpperCAmelCase_ :Optional[int] = len(__snake_case )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCAmelCase_ :Optional[Any] = 36
UpperCAmelCase_ :Optional[int] = top_down_cut_rod(__snake_case, __snake_case )
UpperCAmelCase_ :List[Any] = bottom_up_cut_rod(__snake_case, __snake_case )
UpperCAmelCase_ :Optional[Any] = naive_cut_rod_recursive(__snake_case, __snake_case )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 608 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 703 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.