code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( __a ):
_A = ["pixel_values"]
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = True , lowercase__ = 1 / 255 , lowercase__ = True , lowercase__ = None , lowercase__ = None , lowercase__ = True , **lowercase__ , ):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_A , default_to_square=_A )
SCREAMING_SNAKE_CASE_ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(_A , default_to_square=_A , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ : int = do_resize
SCREAMING_SNAKE_CASE_ : Tuple = size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resample
SCREAMING_SNAKE_CASE_ : int = do_center_crop
SCREAMING_SNAKE_CASE_ : str = crop_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE_ : List[str] = do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ : Dict = do_convert_rgb
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(_A , size=size["shortest_edge"] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_A , size=(size["height"], size["width"]) , data_format=_A , **_A )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(_A , param_name="size" , default_to_square=_A )
SCREAMING_SNAKE_CASE_ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(_A , param_name="crop_size" , default_to_square=_A )
SCREAMING_SNAKE_CASE_ : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : str = [convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Dict = [to_numpy_array(_A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : List[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
SCREAMING_SNAKE_CASE_ : List[Any] = [to_channel_dimension_format(_A , _A ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 421 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_A : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Union[str, Any]=False , snake_case_ : List[str]=False ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = """backbone.""" if is_semantic else """"""
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", """beit.embeddings.cls_token"""),
(f"""{prefix}patch_embed.proj.weight""", """beit.embeddings.patch_embeddings.projection.weight"""),
(f"""{prefix}patch_embed.proj.bias""", """beit.embeddings.patch_embeddings.projection.bias"""),
(f"""{prefix}pos_embed""", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : int , snake_case_ : Optional[int]=False , snake_case_ : List[str]=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
__lowerCAmelCase = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowerCAmelCase = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
__lowerCAmelCase = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = q_bias
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowerCAmelCase = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
__lowerCAmelCase = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
__lowerCAmelCase = gamma_a
__lowerCAmelCase = gamma_a
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : List[str] ) -> int:
'''simple docstring'''
__lowerCAmelCase = dct.pop(snake_case_ )
__lowerCAmelCase = val
def UpperCamelCase_ ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : int , snake_case_ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = False if """rvlcdip""" in checkpoint_url else True
__lowerCAmelCase = BeitConfig(use_absolute_position_embeddings=snake_case_ , use_mask_token=snake_case_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowerCAmelCase = 10_24
__lowerCAmelCase = 40_96
__lowerCAmelCase = 24
__lowerCAmelCase = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowerCAmelCase = 16
__lowerCAmelCase = """huggingface/label-files"""
__lowerCAmelCase = """rvlcdip-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = create_rename_keys(snake_case_ , has_lm_head=snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , has_lm_head=snake_case_ )
# load HuggingFace model
__lowerCAmelCase = BeitForMaskedImageModeling(snake_case_ ) if has_lm_head else BeitForImageClassification(snake_case_ )
model.eval()
model.load_state_dict(snake_case_ )
# Check outputs on an image
__lowerCAmelCase = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case_ )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case_ , return_tensors="""pt""" )
__lowerCAmelCase = encoding["""pixel_values"""]
__lowerCAmelCase = model(snake_case_ )
__lowerCAmelCase = outputs.logits
# verify logits
__lowerCAmelCase = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(snake_case_ ), "Shape of logits not as expected"
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
if has_lm_head:
__lowerCAmelCase = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowerCAmelCase = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case_ , )
model.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case_ , )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_A : Tuple = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 330 | '''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_A : Optional[Any] = logging.get_logger(__name__)
_A : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
__lowerCAmelCase = getattr(snake_case_ , snake_case_ ).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[Any] ) -> int:
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(snake_case_ )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , snake_case_ )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "weight" in name:
__lowerCAmelCase = """weight"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
else:
__lowerCAmelCase = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : int ) -> int:
'''simple docstring'''
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : Dict=True ) -> List[str]:
'''simple docstring'''
if config_path is not None:
__lowerCAmelCase = HubertConfig.from_pretrained(snake_case_ )
else:
__lowerCAmelCase = HubertConfig()
if is_finetuned:
if dict_path:
__lowerCAmelCase = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.eos_index
__lowerCAmelCase = len(target_dict.symbols )
__lowerCAmelCase = os.path.join(snake_case_ , """vocab.json""" )
if not os.path.isdir(snake_case_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , snake_case_ )
__lowerCAmelCase = WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case_ , )
__lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
__lowerCAmelCase = HubertForCTC(snake_case_ )
else:
__lowerCAmelCase = HubertModel(snake_case_ )
if is_finetuned:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_A : str = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330 | 1 |
from __future__ import annotations
import math
def lowercase__( A , A ):
if len(A ) != 2 or len(a[0] ) != 2 or len(A ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
snake_case__ : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase__( A , A ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(A ) )
]
def lowercase__( A , A ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(A ) )
]
def lowercase__( A ):
if len(A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
snake_case__ : Union[str, Any] = len(A )
snake_case__ : int = matrix_length // 2
snake_case__ : Dict = [[a[i][j] for j in range(A , A )] for i in range(A )]
snake_case__ : List[Any] = [
[a[i][j] for j in range(A , A )] for i in range(A , A )
]
snake_case__ : Any = [[a[i][j] for j in range(A )] for i in range(A )]
snake_case__ : List[Any] = [[a[i][j] for j in range(A )] for i in range(A , A )]
return top_left, top_right, bot_left, bot_right
def lowercase__( A ):
return len(A ), len(matrix[0] )
def lowercase__( A ):
print('\n'.join(str(A ) for line in matrix ) )
def lowercase__( A , A ):
if matrix_dimensions(A ) == (2, 2):
return default_matrix_multiplication(A , A )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = split_matrix(A )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = split_matrix(A )
snake_case__ : Optional[int] = actual_strassen(A , matrix_subtraction(A , A ) )
snake_case__ : Dict = actual_strassen(matrix_addition(A , A ) , A )
snake_case__ : Optional[int] = actual_strassen(matrix_addition(A , A ) , A )
snake_case__ : Optional[Any] = actual_strassen(A , matrix_subtraction(A , A ) )
snake_case__ : str = actual_strassen(matrix_addition(A , A ) , matrix_addition(A , A ) )
snake_case__ : Optional[Any] = actual_strassen(matrix_subtraction(A , A ) , matrix_addition(A , A ) )
snake_case__ : Union[str, Any] = actual_strassen(matrix_subtraction(A , A ) , matrix_addition(A , A ) )
snake_case__ : Optional[Any] = matrix_addition(matrix_subtraction(matrix_addition(A , A ) , A ) , A )
snake_case__ : List[Any] = matrix_addition(A , A )
snake_case__ : Optional[Any] = matrix_addition(A , A )
snake_case__ : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(A , A ) , A ) , A )
# construct the new matrix from our 4 quadrants
snake_case__ : Dict = []
for i in range(len(A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase__( A , A ):
if matrix_dimensions(A )[1] != matrix_dimensions(A )[0]:
snake_case__ : Optional[int] = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(A )
snake_case__ : str = matrix_dimensions(A )
snake_case__ : Any = matrix_dimensions(A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case__ : Optional[Any] = max(*A , *A )
snake_case__ : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(A ) ) ) )
snake_case__ : Optional[int] = matrixa
snake_case__ : int = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case__ : List[str] = actual_strassen(A , A )
# Removing the additional zeros
for i in range(0 , A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : str = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 170 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case__ ( UpperCamelCase_ ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( _lowerCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
raise NotImplementedError()
| 170 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return self._get_dummy_components()
def __UpperCamelCase ( self : Any ,a__ : Dict ,a__ : Optional[Any]=0) -> Optional[int]:
"""simple docstring"""
if str(a__).startswith('''mps'''):
_lowerCAmelCase:Tuple = torch.manual_seed(a__)
else:
_lowerCAmelCase:List[Any] = torch.Generator(device=a__).manual_seed(a__)
_lowerCAmelCase:Optional[int] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a__)).to(a__)
_lowerCAmelCase:Tuple = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a__)).to(a__)
_lowerCAmelCase:Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def __UpperCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def __UpperCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def __UpperCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def __UpperCamelCase ( self : Dict) -> Dict:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def __UpperCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
self._test_save_load_local()
def __UpperCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 439 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 439 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
UpperCAmelCase_ = {
"facebook/blenderbot_small-90M": 5_12,
}
class __lowercase ( __magic_name__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase=False , UpperCamelCase=True , **UpperCamelCase , ) -> List[Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase , merges=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase , ) , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , **UpperCamelCase , )
__a = add_prefix_space
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=None ) -> List[Any]:
__a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 539 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : list[list[int]], _UpperCamelCase : int, _UpperCamelCase : int, _UpperCamelCase : list[int] ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _UpperCAmelCase ( _UpperCamelCase : list[list[int]], _UpperCamelCase : list[int], _UpperCamelCase : int ) -> bool:
# Base Case
if curr_ind == len(_UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0, len(_UpperCamelCase ) ):
if valid_connection(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
# Insert current vertex into path as next transition
A_ = next_ver
# Validate created path
if util_hamilton_cycle(_UpperCamelCase, _UpperCamelCase, curr_ind + 1 ):
return True
# Backtrack
A_ = -1
return False
def _UpperCAmelCase ( _UpperCamelCase : list[list[int]], _UpperCamelCase : int = 0 ) -> list[int]:
A_ = [-1] * (len(_UpperCamelCase ) + 1)
# initialize start and end of path with starting index
A_ = A_ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_UpperCamelCase, _UpperCamelCase, 1 ) else []
| 174 | '''simple docstring'''
from collections.abc import Iterable
from typing import Any
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE = None ) -> List[str]:
A_ = value
A_ = None # Added in order to delete a node easier
A_ = None
A_ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE = None ) -> Any:
A_ = root
def __str__( self ) -> str:
return str(self.root )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if new_children is not None: # reset its kids
A_ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_SCREAMING_SNAKE_CASE ): # If it is the right children
A_ = new_children
else:
A_ = new_children
else:
A_ = new_children
def __A ( self , _SCREAMING_SNAKE_CASE ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __A ( self ) -> bool:
return self.root is None
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = Node(_SCREAMING_SNAKE_CASE ) # create a new Node
if self.empty(): # if Tree is empty
A_ = new_node # set its root
else: # Tree is not empty
A_ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
A_ = new_node # We insert the new node in a leaf
break
else:
A_ = parent_node.left
else:
if parent_node.right is None:
A_ = new_node
break
else:
A_ = parent_node.right
A_ = parent_node
def __A ( self , *_SCREAMING_SNAKE_CASE ) -> None:
for value in values:
self.__insert(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
A_ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
A_ = node.left if value < node.value else node.right
return node
def __A ( self , _SCREAMING_SNAKE_CASE = None ) -> Node | None:
if node is None:
if self.root is None:
return None
A_ = self.root
if not self.empty():
while node.right is not None:
A_ = node.right
return node
def __A ( self , _SCREAMING_SNAKE_CASE = None ) -> Node | None:
if node is None:
A_ = self.root
if self.root is None:
return None
if not self.empty():
A_ = self.root
while node.left is not None:
A_ = node.left
return node
def __A ( self , _SCREAMING_SNAKE_CASE ) -> None:
A_ = self.search(_SCREAMING_SNAKE_CASE ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif node.left is None: # Has only right children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_SCREAMING_SNAKE_CASE , node.left )
else:
A_ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
A_ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __A ( self , _SCREAMING_SNAKE_CASE=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if node:
self.inorder(_SCREAMING_SNAKE_CASE , node.left )
arr.append(node.value )
self.inorder(_SCREAMING_SNAKE_CASE , node.right )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
A_ = []
self.inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # append all values to list using inorder traversal
return arr[k - 1]
def _UpperCAmelCase ( _UpperCamelCase : Node | None ) -> list[Node]:
A_ = []
if curr_node is not None:
A_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _UpperCAmelCase ( ) -> None:
A_ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
A_ = BinarySearchTree()
for i in testlist:
t.insert(_UpperCamelCase )
# Prints all the elements of the list in order traversal
print(_UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_UpperCamelCase )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 174 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 416 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {"tokenizer_file": "tokenizer.json"}
__lowerCamelCase : List[str] = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class a ( UpperCamelCase_ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = ["""input_ids""", """attention_mask"""]
__lowercase = None
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<unk>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase=False , __UpperCamelCase=False , **__UpperCamelCase , )-> str:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , **__UpperCamelCase , )
A__ : str =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space:
A__ : Union[str, Any] =getattr(__UpperCamelCase , pre_tok_state.pop('''type''' ) )
A__ : Optional[Any] =add_prefix_space
A__ : str =pre_tok_class(**__UpperCamelCase )
A__ : Tuple =add_prefix_space
def lowerCAmelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
'''simple docstring'''
A__ : Union[str, Any] =kwargs.get('''is_split_into_words''' , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
'''simple docstring'''
A__ : int =kwargs.get('''is_split_into_words''' , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
''' pretokenized inputs.''' )
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
'''simple docstring'''
A__ : Tuple =self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> List[int]:
'''simple docstring'''
A__ : str =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
A__ : Optional[int] =input_ids[-self.model_max_length :]
return input_ids
| 416 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir("""fixtures""")
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> str:
A__ = mock.Mock()
A__ = 500
A__ = {}
A__ = HTTPError
A__ = {}
# Download this model to make sure it's in the cache.
A__ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
A__ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__ ( self ) -> Optional[int]:
A__ = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls ) -> Union[str, Any]:
A__ = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def snake_case__ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def snake_case__ ( self ) -> Tuple:
A__ = WavaVecaFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
A__ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case__ , repo_id="test-feature-extractor" , push_to_hub=snake_case__ , use_auth_token=self._token )
A__ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def snake_case__ ( self ) -> str:
A__ = WavaVecaFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
A__ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
A__ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def snake_case__ ( self ) -> Union[str, Any]:
CustomFeatureExtractor.register_for_auto_class()
A__ = CustomFeatureExtractor.from_pretrained(snake_case__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
A__ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 719 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : torch.FloatTensor
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 65536 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 2 , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = "fourier" , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE__ = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = (32, 32, 64) , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 8 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = False , ) -> Union[str, Any]:
super().__init__()
A__ = sample_size
# time
if time_embedding_type == "fourier":
A__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE__ , log=SCREAMING_SNAKE_CASE__ , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ )
A__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ , downscale_freq_shift=SCREAMING_SNAKE_CASE__ )
A__ = block_out_channels[0]
if use_timestep_embedding:
A__ = block_out_channels[0] * 4
A__ = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE__ , time_embed_dim=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , out_dim=block_out_channels[0] , )
A__ = nn.ModuleList([] )
A__ = None
A__ = nn.ModuleList([] )
A__ = None
# down
A__ = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = output_channel
A__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
A__ = get_down_block(
SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE__ )
# mid
A__ = get_mid_block(
SCREAMING_SNAKE_CASE__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE__ , add_downsample=SCREAMING_SNAKE_CASE__ , )
# up
A__ = list(reversed(SCREAMING_SNAKE_CASE__ ) )
A__ = reversed_block_out_channels[0]
if out_block_type is None:
A__ = out_channels
else:
A__ = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = output_channel
A__ = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE__ ) - 1 else final_upsample_channels
)
A__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
A__ = get_up_block(
SCREAMING_SNAKE_CASE__ , num_layers=SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE__ )
A__ = output_channel
# out
A__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
A__ = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE__ , num_groups_out=SCREAMING_SNAKE_CASE__ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , fc_dim=block_out_channels[-1] // 4 , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ) -> Union[UNetaDOutput, Tuple]:
A__ = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__ ):
A__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(sample.device )
A__ = self.time_proj(SCREAMING_SNAKE_CASE__ )
if self.config.use_timestep_embedding:
A__ = self.time_mlp(SCREAMING_SNAKE_CASE__ )
else:
A__ = timestep_embed[..., None]
A__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A__ = ()
for downsample_block in self.down_blocks:
A__ , A__ = downsample_block(hidden_states=SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A__ = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A__ = down_block_res_samples[-1:]
A__ = down_block_res_samples[:-1]
A__ = upsample_block(SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ )
# 5. post-process
if self.out_block:
A__ = self.out_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE__ )
| 562 | 0 |
from math import factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 20 ):
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''') | 39 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ):
UpperCAmelCase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : List[Any] = min_resolution
UpperCAmelCase__ : str = max_resolution
UpperCAmelCase__ : Union[str, Any] = do_resize
UpperCAmelCase__ : Tuple = size
UpperCAmelCase__ : int = do_normalize
def __UpperCAmelCase ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = ImageGPTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = ImageGPTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """clusters""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Union[str, Any] = os.path.join(_lowerCAmelCase , """image_processor.json""" )
image_processor_first.to_json_file(_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = self.image_processing_class.from_json_file(_lowerCAmelCase ).to_dict()
UpperCAmelCase__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = self.image_processing_class.from_pretrained(_lowerCAmelCase ).to_dict()
UpperCAmelCase__ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __UpperCAmelCase ( self ):
pass
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Any = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
UpperCAmelCase__ : Dict = Image.open(dataset[4]["""file"""] )
UpperCAmelCase__ : Optional[Any] = Image.open(dataset[5]["""file"""] )
UpperCAmelCase__ : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
UpperCAmelCase__ : int = prepare_images()
# test non-batched
UpperCAmelCase__ : List[str] = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCAmelCase__ : List[Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCAmelCase )
# test batched
UpperCAmelCase__ : List[str] = image_processing(_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCAmelCase__ : Any = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCAmelCase )
| 79 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self : Optional[int] ):
_snake_case = {}
def lowercase ( self : Optional[Any] , _lowerCamelCase : Any ):
_snake_case = {}
def lowercase ( self : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
if nodea not in self.connections:
self.add_node(lowercase_ )
if nodea not in self.connections:
self.add_node(lowercase_ )
_snake_case = probability
def lowercase ( self : str ):
return list(self.connections )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[str] ):
_snake_case = 0
_snake_case = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
_snake_case = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_snake_case = Counter(graph.get_nodes() )
_snake_case = start
for _ in range(__SCREAMING_SNAKE_CASE ):
_snake_case = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str = "cpu" , __lowerCamelCase : Union[str, None] = None ) -> None:
_snake_case = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
_snake_case = v.half()
if save_path is None: # overwrite src_path
_snake_case = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 430 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
A_ : str = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
A_ : int = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
A_ : Tuple = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase( datasets.Metric ):
"""simple docstring"""
def _a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = 0.0
for i, j in zip(_lowerCamelCase , _lowerCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_lowerCamelCase , _lowerCamelCase ) else 0.0
UpperCamelCase_: List[str] = n_correct / len(_lowerCamelCase )
return {
"accuracy": accuracy,
} | 57 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCamelCase : str , lowerCamelCase : list[str] | None = None , lowerCamelCase : dict[str, float] | None = None , lowerCamelCase : bool = False , ) -> tuple[int, float, str]:
lowerCAmelCase__ : List[Any] = cipher_alphabet or [chr(lowerCamelCase ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase__ : str = {
"a": 0.0_84_97,
"b": 0.0_14_92,
"c": 0.0_22_02,
"d": 0.0_42_53,
"e": 0.1_11_62,
"f": 0.0_22_28,
"g": 0.0_20_15,
"h": 0.0_60_94,
"i": 0.0_75_46,
"j": 0.0_01_53,
"k": 0.0_12_92,
"l": 0.0_40_25,
"m": 0.0_24_06,
"n": 0.0_67_49,
"o": 0.0_75_07,
"p": 0.0_19_29,
"q": 0.0_00_95,
"r": 0.0_75_87,
"s": 0.0_63_27,
"t": 0.0_93_56,
"u": 0.0_27_58,
"v": 0.0_09_78,
"w": 0.0_25_60,
"x": 0.0_01_50,
"y": 0.0_19_94,
"z": 0.0_00_77,
}
else:
# Custom frequencies dictionary
lowerCAmelCase__ : List[str] = frequencies_dict
if not case_sensitive:
lowerCAmelCase__ : Tuple = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowerCamelCase ) ):
lowerCAmelCase__ : Tuple = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase__ : Tuple = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCamelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase__ : List[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase__ : Dict = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : Tuple = decrypted_with_shift.lower().count(lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : List[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase__ : Optional[Any] = decrypted_with_shift.count(lowerCamelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase__ : Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase__ : int = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCamelCase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase__ : int = min(
lowerCamelCase , key=lowerCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[str] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 308 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def __magic_name__( _A , _A ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_A ) )
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = 0.0
for coeff in reversed(_A ):
UpperCamelCase__ = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase_ : Tuple = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 265 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : str = field(default="audio-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__a : ClassVar[Features] = Features({"audio": Audio()} )
__a : ClassVar[Features] = Features({"labels": ClassLabel} )
__a : str = "audio"
__a : str = "labels"
def A ( self : List[Any] , lowercase : List[Any] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def A ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 265 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( A__, A__, A__, A__, A__=True, A__="pt" ):
SCREAMING_SNAKE_CASE_ : Tuple = {'add_prefix_space': True} if isinstance(A__, A__ ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE_ : Dict = padding_side
return tokenizer(
[line], max_length=A__, padding='max_length' if pad_to_max_length else None, truncation=A__, return_tensors=A__, add_special_tokens=A__, **A__, )
def a__ ( A__, A__, A__=None, ):
SCREAMING_SNAKE_CASE_ : int = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="train" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="" , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCAmelCase__ ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(lowerCAmelCase__ ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE_ : int = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ : int = max_source_length
SCREAMING_SNAKE_CASE_ : Dict = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
SCREAMING_SNAKE_CASE_ : Any = tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE_ : List[Any] = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ : List[str] = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('\n' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ : Any = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
SCREAMING_SNAKE_CASE_ : Dict = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE_ : Tuple = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE_ : List[str] = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : Dict = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : str = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : int = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase__ : Tuple =getLogger(__name__)
def a__ ( A__ ):
return list(itertools.chain.from_iterable(A__ ) )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = get_git_info()
save_json(A__, os.path.join(A__, 'git_log.json' ) )
def a__ ( A__, A__, A__=4, **A__ ):
with open(A__, 'w' ) as f:
json.dump(A__, A__, indent=A__, **A__ )
def a__ ( A__ ):
with open(A__ ) as f:
return json.load(A__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[str] = git.Repo(search_parent_directories=A__ )
SCREAMING_SNAKE_CASE_ : Any = {
'repo_id': str(A__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a__ ( A__, A__ ):
return list(map(A__, A__ ) )
def a__ ( A__, A__ ):
with open(A__, 'wb' ) as f:
return pickle.dump(A__, A__ )
def a__ ( A__ ):
def remove_articles(A__ ):
return re.sub(r'\b(a|an|the)\b', ' ', A__ )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : List[str] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(A__ ) & Counter(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ : List[Any] = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : int = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( A__, A__ ):
return normalize_answer(A__ ) == normalize_answer(A__ )
def a__ ( A__, A__ ):
assert len(A__ ) == len(A__ )
SCREAMING_SNAKE_CASE_ : Any = 0
for hypo, pred in zip(A__, A__ ):
em += exact_match_score(A__, A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def a__ ( A__ ):
return model_prefix.startswith('rag' )
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ : List[Any] = 'dropout_rate'
for p in extra_params:
if getattr(A__, A__, A__ ):
if not hasattr(A__, A__ ) and not hasattr(A__, equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(A__ ) )
delattr(A__, A__ )
continue
SCREAMING_SNAKE_CASE_ : Any = p if hasattr(A__, A__ ) else equivalent_param[p]
setattr(A__, A__, getattr(A__, A__ ) )
delattr(A__, A__ )
return hparams, config
| 101 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = 2
while digits < n:
index += 1
UpperCAmelCase = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _UpperCamelCase ( lowerCAmelCase_ = 1_0_0_0 ) ->int:
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 377 | 0 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase (snake_case__ : str ) -> dict:
'''simple docstring'''
lowerCAmelCase = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(snake_case__ ).json()
def lowercase (snake_case__ : int = 10 ) -> list[dict]:
'''simple docstring'''
lowerCAmelCase = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
lowerCAmelCase = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def lowercase (snake_case__ : int = 10 ) -> str:
'''simple docstring'''
lowerCAmelCase = hackernews_top_stories(snake_case__ )
return "\n".join("""* [{title}]({url})""".format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 529 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def lowercase (snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase = os.path.abspath(snake_case__ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
lowerCAmelCase = tf.train.list_variables(snake_case__ )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(snake_case__ )
# read data
lowerCAmelCase = tf.train.load_variable(snake_case__ , snake_case__ )
names.append("""/""".join(snake_case__ ) )
arrays.append(snake_case__ )
logger.info(f'''Read a total of {len(snake_case__ ):,} layers''' )
# Sanity check
if len(set(snake_case__ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(snake_case__ ) )})''' )
lowerCAmelCase = list(set(snake_case__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(snake_case__ , snake_case__ ):
lowerCAmelCase = full_name.split("""/""" )
lowerCAmelCase = model
lowerCAmelCase = []
for i, m_name in enumerate(snake_case__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowerCAmelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowerCAmelCase = getattr(snake_case__ , """encoder""" )
lowerCAmelCase = getattr(snake_case__ , """layer""" )
lowerCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """pooler""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
lowerCAmelCase = getattr(snake_case__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowerCAmelCase = getattr(snake_case__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowerCAmelCase = getattr(snake_case__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowerCAmelCase = getattr(snake_case__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """intermediate""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowerCAmelCase = getattr(snake_case__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowerCAmelCase = getattr(snake_case__ , """weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
lowerCAmelCase = """.""".join(snake_case__ )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , snake_case__ ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , snake_case__ ):
lowerCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase = torch.from_numpy(snake_case__ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowercase (snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Any ) -> str:
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
lowerCAmelCase = BertConfig.from_json_file(snake_case__ )
lowerCAmelCase = BertModel(snake_case__ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
a = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 529 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A :
def __init__( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int=13 , __magic_name__ : List[Any]=7 , __magic_name__ : Any=True , __magic_name__ : Optional[int]=True , __magic_name__ : Tuple=False , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=32 , __magic_name__ : str=5 , __magic_name__ : int=4 , __magic_name__ : Dict=37 , __magic_name__ : Tuple="gelu" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=512 , __magic_name__ : Dict=16 , __magic_name__ : Any=2 , __magic_name__ : Any=0.02 , __magic_name__ : Tuple=3 , __magic_name__ : List[str]=4 , __magic_name__ : Union[str, Any]=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , use_stable_embedding=__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = OpenLlamaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = OpenLlamaModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , )
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# first forward pass
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , )
lowerCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )["hidden_states"][0]
lowerCAmelCase__ = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )["hidden_states"][0]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ :Tuple = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ :List[str] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ :List[str] = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = OpenLlamaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = input_dict["input_ids"]
lowerCAmelCase__ = input_ids.ne(1 ).to(__magic_name__ )
lowerCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = "single_label_classification"
lowerCAmelCase__ = input_dict["input_ids"]
lowerCAmelCase__ = input_ids.ne(1 ).to(__magic_name__ )
lowerCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = "multi_label_classification"
lowerCAmelCase__ = input_dict["input_ids"]
lowerCAmelCase__ = input_ids.ne(1 ).to(__magic_name__ )
lowerCAmelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ = OpenLlamaModel(__magic_name__ )
original_model.to(__magic_name__ )
original_model.eval()
lowerCAmelCase__ = original_model(__magic_name__ ).last_hidden_state
lowerCAmelCase__ = original_model(__magic_name__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ = {"type": scaling_type, "factor": 10.0}
lowerCAmelCase__ = OpenLlamaModel(__magic_name__ )
scaled_model.to(__magic_name__ )
scaled_model.eval()
lowerCAmelCase__ = scaled_model(__magic_name__ ).last_hidden_state
lowerCAmelCase__ = scaled_model(__magic_name__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
| 48 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
a = None
if token is not None:
a = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
a = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
a = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
a = requests.get(url + f'&page={i + 2}' , headers=__lowerCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Dict:
a = None
if token is not None:
a = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
a = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
a = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
a = requests.get(url + f'&page={i + 2}' , headers=__lowerCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
a = None
if token is not None:
a = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
a = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
a = result.headers["""Location"""]
a = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
a = os.path.join(__lowerCamelCase , f'{artifact_name}.zip' )
with open(__lowerCamelCase , """wb""" ) as fp:
fp.write(response.content )
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
a = []
a = []
a = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
a = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a = line[: line.index(""": """ )]
a = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
a = line[len("""FAILED """ ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
a = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` '
f'and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
a = None
if job_name and job_links:
a = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
a = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Dict:
a = []
a = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
a = Counter()
counter.update([x[1] for x in logs] )
a = counter.most_common()
a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def __A ( __lowerCamelCase ) -> List[str]:
a = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
a = test.split("""/""" )[2]
else:
a = None
return test
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Any:
a = [(x[0], x[1], get_model(x[2] )) for x in logs]
a = [x for x in logs if x[2] is not None]
a = {x[2] for x in logs}
a = {}
for test in tests:
a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a = counter.most_common()
a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a = sum(error_counts.values() )
if n_errors > 0:
a = {"""count""": n_errors, """errors""": error_counts}
a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def __A ( __lowerCamelCase ) -> Optional[int]:
a = """| no. | error | status |"""
a = """|-:|:-|:-|"""
a = [header, sep]
for error in reduced_by_error:
a = reduced_by_error[error]["""count"""]
a = f'| {count} | {error[:100]} | |'
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def __A ( __lowerCamelCase ) -> int:
a = """| model | no. of errors | major error | count |"""
a = """|-:|-:|-:|-:|"""
a = [header, sep]
for model in reduced_by_model:
a = reduced_by_model[model]["""count"""]
a , a = list(reduced_by_model[model]["""errors"""].items() )[0]
a = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
__UpperCamelCase : Any = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__UpperCamelCase : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token)
__UpperCamelCase : str = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__UpperCamelCase : List[str] = k.find(" / ")
__UpperCamelCase : List[Any] = k[index + len(" / ") :]
__UpperCamelCase : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__UpperCamelCase : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__UpperCamelCase : int = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__UpperCamelCase : Optional[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__UpperCamelCase : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__UpperCamelCase : Union[str, Any] = reduce_by_error(errors)
__UpperCamelCase : Dict = reduce_by_model(errors)
__UpperCamelCase : Union[str, Any] = make_github_table(reduced_by_error)
__UpperCamelCase : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 468 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _SCREAMING_SNAKE_CASE ( _snake_case):
@slow
@require_torch
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowerCamelCase_ =BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase_ =bertabert.config.encoder.vocab_size
lowerCamelCase_ =tokenizer.sep_token_id
lowerCamelCase_ =tokenizer.cls_token_id
lowerCamelCase_ =128
lowerCamelCase_ =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowerCamelCase_ =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowerCamelCase_ =train_dataset.select(range(32 ) )
lowerCamelCase_ =val_dataset.select(range(16 ) )
lowerCamelCase_ =4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase_ =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
lowerCamelCase_ =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=128 )
lowerCamelCase_ =inputs.input_ids
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =outputs.input_ids
lowerCamelCase_ =outputs.input_ids.copy()
lowerCamelCase_ =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowerCamelCase_ =outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =pred.label_ids
lowerCamelCase_ =pred.predictions
# all unnecessary tokens are removed
lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase_ =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowerCamelCase_ =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowerCamelCase_ =self.get_auto_remove_tmp_dir()
lowerCamelCase_ =SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase_ =SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 713 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] )
lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature:
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ ={"""pixel_values""": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 75 | 0 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowercase = HfArgumentParser(InitializationArguments)
lowercase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowercase = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
lowercase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowercase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 211 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase = get_logger(__name__)
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : Optional[Any] = '''dummy_data'''
snake_case__ : Optional[int] = '''datasets'''
snake_case__ : Any = False
def __init__( self , a__ , a__ , a__ , a__ = None , a__ = False , a__ = True , a__ = None , ):
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = dataset_name
__SCREAMING_SNAKE_CASE : List[str] = cache_dir
__SCREAMING_SNAKE_CASE : Optional[int] = use_local_dummy_data
__SCREAMING_SNAKE_CASE : Optional[Any] = config
# download_callbacks take a single url as input
__SCREAMING_SNAKE_CASE : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__SCREAMING_SNAKE_CASE : Optional[int] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__SCREAMING_SNAKE_CASE : List[Any] = str(a__ )
# to be downloaded
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Any = None
@property
def a_ ( self ):
if self._dummy_file is None:
__SCREAMING_SNAKE_CASE : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def a_ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def a_ ( self ):
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__SCREAMING_SNAKE_CASE : int = cached_path(
a__ , cache_dir=self.cache_dir , extract_compressed_file=a__ , force_extract=a__ )
return os.path.join(a__ , self.dummy_file_name )
@property
def a_ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a_ ( self ):
if self._bucket_url is None:
__SCREAMING_SNAKE_CASE : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def a_ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def a_ ( self , a__ , *a__ ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__SCREAMING_SNAKE_CASE : Any = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(a__ , a__ ):
return self.create_dummy_data_dict(a__ , a__ )
elif isinstance(a__ , (list, tuple) ):
return self.create_dummy_data_list(a__ , a__ )
else:
return self.create_dummy_data_single(a__ , a__ )
def a_ ( self , a__ , *a__ ):
return self.download_and_extract(a__ )
def a_ ( self , a__ , a__ ):
return self.download_and_extract(a__ )
def a_ ( self , a__ , *a__ , **a__ ):
return path
def a_ ( self ):
return {}
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(a__ , a__ ):
for single_url in single_urls:
download_callback(a__ )
else:
__SCREAMING_SNAKE_CASE : Any = single_urls
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Any = [os.path.join(a__ , urllib.parse.quote_plus(Path(a__ ).name ) ) for x in single_urls]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = single_urls
__SCREAMING_SNAKE_CASE : str = os.path.join(a__ , urllib.parse.quote_plus(Path(a__ ).name ) )
__SCREAMING_SNAKE_CASE : List[str] = value
# make sure that values are unique
if all(isinstance(a__ , a__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__SCREAMING_SNAKE_CASE : Union[str, Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__SCREAMING_SNAKE_CASE : Optional[int] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , a__ ) ) for url in data_url )
__SCREAMING_SNAKE_CASE : List[str] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [data_url[0]] * len(a__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__SCREAMING_SNAKE_CASE : str = os.path.join(a__ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(a__ )
return dummy_data_list
def a_ ( self , a__ , a__ ):
for download_callback in self.download_callbacks:
download_callback(a__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(a__ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(a__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a_ ( self ):
pass
def a_ ( self ):
pass
def a_ ( self , a__ ):
def _iter_archive_members(a__ ):
# this preserves the order of the members inside the ZIP archive
__SCREAMING_SNAKE_CASE : Dict = Path(self.dummy_file ).parent
__SCREAMING_SNAKE_CASE : str = path.relative_to(a__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__SCREAMING_SNAKE_CASE : Any = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(a__ )
__SCREAMING_SNAKE_CASE : List[Any] = Path(a__ )
__SCREAMING_SNAKE_CASE : int = _iter_archive_members(a__ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(a__ ).as_posix(), file_path.open("rb" )
def a_ ( self , a__ ):
if not isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [paths]
for path in paths:
if os.path.isfile(a__ ):
if os.path.basename(a__ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(a__ ):
if os.path.basename(a__ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(a__ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(a__ , a__ )
| 211 | 1 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = R"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class lowerCAmelCase_ ( _A ):
"""simple docstring"""
@add_start_docstrings(__lowerCamelCase )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class lowerCAmelCase_ ( _A ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = max_length
snake_case = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
snake_case = input_ids.shape[-1]
snake_case = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'exceptions, performance degradation, or nothing at all.' )
return is_done
class lowerCAmelCase_ ( _A ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'with `max_length = start_length + max_new_tokens` instead.' , __lowerCamelCase , )
snake_case = start_length
snake_case = max_new_tokens
snake_case = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase_ ( _A ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = max_time
snake_case = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase_ ( _A ):
"""simple docstring"""
@add_start_docstrings(__lowerCamelCase )
def __call__( self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def snake_case ( self ):
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
snake_case = stopping_criteria.max_length
snake_case = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria
| 717 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = """layoutlmv3"""
def __init__( self , lowerCAmelCase=5_02_65 , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=10_24 , lowerCAmelCase=1_28 , lowerCAmelCase=1_28 , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=1_28 , lowerCAmelCase=64 , lowerCAmelCase=2_56 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2_24 , lowerCAmelCase=3 , lowerCAmelCase=16 , lowerCAmelCase=None , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
vocab_size=lowerCAmelCase , hidden_size=lowerCAmelCase , num_hidden_layers=lowerCAmelCase , num_attention_heads=lowerCAmelCase , intermediate_size=lowerCAmelCase , hidden_act=lowerCAmelCase , hidden_dropout_prob=lowerCAmelCase , attention_probs_dropout_prob=lowerCAmelCase , max_position_embeddings=lowerCAmelCase , type_vocab_size=lowerCAmelCase , initializer_range=lowerCAmelCase , layer_norm_eps=lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
snake_case = max_ad_position_embeddings
snake_case = coordinate_size
snake_case = shape_size
snake_case = has_relative_attention_bias
snake_case = rel_pos_bins
snake_case = max_rel_pos
snake_case = has_spatial_attention_bias
snake_case = rel_ad_pos_bins
snake_case = max_rel_ad_pos
snake_case = text_embed
snake_case = visual_embed
snake_case = input_size
snake_case = num_channels
snake_case = patch_size
snake_case = classifier_dropout
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = version.parse("""1.12""" )
@property
def snake_case ( self ):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def snake_case ( self ):
"""simple docstring"""
return 1E-5
@property
def snake_case ( self ):
"""simple docstring"""
return 12
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = 3 , lowerCAmelCase = 40 , lowerCAmelCase = 40 , ):
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case = processor.tokenizer.num_special_tokens_to_add(lowerCAmelCase )
snake_case = compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case = self._generate_dummy_images(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
snake_case = dict(
processor(
lowerCAmelCase , text=lowerCAmelCase , boxes=lowerCAmelCase , return_tensors=lowerCAmelCase , ) )
return inputs
| 104 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCamelCase = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def a ( __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any=None ) -> Tuple:
if rng is None:
__magic_name__: Union[str, Any] = random.Random()
__magic_name__: Tuple = 1
for dim in shape:
total_dims *= dim
__magic_name__: int = []
for _ in range(__UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__magic_name__: int = np.array(__UpperCAmelCase , dtype=jnp.intaa ).reshape(__UpperCAmelCase )
return output
def a ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int]=None ) -> int:
__magic_name__: Tuple = ids_tensor(__UpperCAmelCase , vocab_size=2 , rng=__UpperCAmelCase )
# make sure that at least one token is attended to for each batch
__magic_name__: List[str] = 1
return attn_mask
@require_flax
class __A :
UpperCAmelCase__ = None
UpperCAmelCase__ = ()
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__, __magic_name__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__magic_name__: Tuple = 2
__magic_name__: Tuple = inputs["""input_ids"""].shape[-1] // 2
__magic_name__: Tuple = inputs["""input_ids"""][:max_batch_size, :sequence_length]
__magic_name__: Any = jnp.ones_like(__snake_case )
__magic_name__: Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__magic_name__: int = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__magic_name__: Dict = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def lowerCamelCase__ ( self : str ) -> str:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Union[str, Any] = self._get_input_ids_and_config()
__magic_name__: Optional[Any] = False
__magic_name__: List[Any] = max_length
__magic_name__: Optional[Any] = 0
for model_class in self.all_generative_model_classes:
__magic_name__: Optional[Any] = model_class(__snake_case )
__magic_name__: Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__: Any = getattr(__snake_case , __snake_case )
__magic_name__: List[Any] = pt_model_class(__snake_case ).eval()
__magic_name__: Tuple = load_flax_weights_in_pytorch_model(__snake_case , flax_model.params )
__magic_name__: Any = flax_model.generate(__snake_case ).sequences
__magic_name__: int = pt_model.generate(torch.tensor(__snake_case , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__magic_name__: Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Dict = self._get_input_ids_and_config()
__magic_name__: List[Any] = False
__magic_name__: str = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Any = model_class(__snake_case )
__magic_name__: Optional[int] = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Union[str, Any] = jit(model.generate )
__magic_name__: int = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Optional[int] = self._get_input_ids_and_config()
__magic_name__: Union[str, Any] = True
__magic_name__: Any = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Any = model_class(__snake_case )
__magic_name__: Dict = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Optional[Any] = jit(model.generate )
__magic_name__: Tuple = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: str = self._get_input_ids_and_config()
__magic_name__: Tuple = False
__magic_name__: str = max_length
__magic_name__: List[str] = 2
for model_class in self.all_generative_model_classes:
__magic_name__: Optional[Any] = model_class(__snake_case )
__magic_name__: Dict = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Dict = jit(model.generate )
__magic_name__: List[Any] = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : str ) -> List[str]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: List[str] = self._get_input_ids_and_config()
__magic_name__: List[Any] = False
__magic_name__: int = max_length
__magic_name__: Tuple = 2
__magic_name__: Optional[int] = 2
for model_class in self.all_generative_model_classes:
__magic_name__: int = model_class(__snake_case )
__magic_name__: Any = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Tuple = self._get_input_ids_and_config()
__magic_name__: Any = True
__magic_name__: Optional[int] = max_length
__magic_name__: int = 0.8
__magic_name__: Any = 1_0
__magic_name__: Dict = 0.3
__magic_name__: List[Any] = 1
__magic_name__: Any = 8
__magic_name__: List[str] = 9
for model_class in self.all_generative_model_classes:
__magic_name__: Optional[int] = model_class(__snake_case )
__magic_name__: Optional[Any] = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Dict = jit(model.generate )
__magic_name__: int = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : str ) -> str:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Any = self._get_input_ids_and_config()
__magic_name__: Optional[Any] = max_length
__magic_name__: str = 1
__magic_name__: Tuple = 8
__magic_name__: str = 9
for model_class in self.all_generative_model_classes:
__magic_name__: Any = model_class(__snake_case )
__magic_name__: Dict = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: str = jit(model.generate )
__magic_name__: Tuple = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : List[str] ) -> str:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: List[str] = self._get_input_ids_and_config()
__magic_name__: Optional[int] = max_length
__magic_name__: List[str] = 2
__magic_name__: List[Any] = 1
__magic_name__: Dict = 8
__magic_name__: Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
__magic_name__: Any = model_class(__snake_case )
__magic_name__: List[Any] = model.generate(__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Tuple = jit(model.generate )
__magic_name__: Union[str, Any] = jit_generate(__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: Dict = self._get_input_ids_and_config()
# pad attention mask on the left
__magic_name__: int = attention_mask.at[(0, 0)].set(0 )
__magic_name__: int = False
__magic_name__: Tuple = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Tuple = model_class(__snake_case )
__magic_name__: Any = model.generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Dict = jit(model.generate )
__magic_name__: List[Any] = jit_generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
__magic_name__: str = attention_mask.at[(0, 0)].set(0 )
__magic_name__: Dict = True
__magic_name__: List[str] = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Union[str, Any] = model_class(__snake_case )
__magic_name__: Optional[Any] = model.generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: int = jit(model.generate )
__magic_name__: int = jit_generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__magic_name__, __magic_name__, __magic_name__, __magic_name__: List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
__magic_name__: Optional[int] = attention_mask.at[(0, 0)].set(0 )
__magic_name__: int = 2
__magic_name__: str = max_length
for model_class in self.all_generative_model_classes:
__magic_name__: Union[str, Any] = model_class(__snake_case )
__magic_name__: List[str] = model.generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertEqual(generation_outputs.shape[-1] , __snake_case )
__magic_name__: Optional[int] = jit(model.generate )
__magic_name__: str = jit_generate(__snake_case , attention_mask=__snake_case ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__magic_name__: List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__magic_name__: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__magic_name__: Union[str, Any] = """Hello world"""
__magic_name__: List[str] = tokenizer(__snake_case , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__snake_case , """do_samples""" ):
model.generate(__snake_case , do_samples=__snake_case )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__snake_case , """foo""" ):
__magic_name__: List[str] = {"""foo""": """bar"""}
model.generate(__snake_case , **__snake_case )
| 96 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase__ ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = cva.getAffineTransform(__snake_case , __snake_case )
return cva.warpAffine(__snake_case , __snake_case , (rows, cols) )
if __name__ == "__main__":
# read original image
__UpperCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
__UpperCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__UpperCAmelCase , __UpperCAmelCase = gray_img.shape
# set different points to rotate image
__UpperCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__UpperCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__UpperCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__UpperCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__UpperCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__UpperCAmelCase = plt.figure(1)
__UpperCAmelCase = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 406 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase )
class a ( _lowerCamelCase ):
snake_case_ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"audio": Audio()} )
snake_case_ = Features({"transcription": Value("string" )} )
snake_case_ = "audio"
snake_case_ = "transcription"
def A_ ( self : str , lowercase_ : List[Any] ):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , lowercase_ ):
raise ValueError(F"Column {self.audio_column} is not an Audio type." )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.input_schema.copy()
snake_case_ = features[self.audio_column]
snake_case_ = input_schema
return task_template
@property
def A_ ( self : List[str] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 705 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a : Tuple = logging.get_logger(__name__)
a : Any = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
a : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case_ = model_type_to_module_name(__UpperCAmelCase )
snake_case_ = importlib.import_module(F".{module_name}", '''transformers.models''' )
try:
return getattr(__UpperCAmelCase, __UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__UpperCAmelCase, '''__name__''', __UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case_ = importlib.import_module('''transformers''' )
if hasattr(__UpperCAmelCase, __UpperCAmelCase ):
return getattr(__UpperCAmelCase, __UpperCAmelCase )
return None
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = False, __UpperCAmelCase = False, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = False, **__UpperCAmelCase, ) -> Dict:
'''simple docstring'''
snake_case_ = get_file_from_repo(
__UpperCAmelCase, __UpperCAmelCase, cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, resume_download=__UpperCAmelCase, proxies=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, revision=__UpperCAmelCase, local_files_only=__UpperCAmelCase, )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__UpperCAmelCase, encoding='''utf-8''' ) as reader:
return json.load(__UpperCAmelCase )
class a :
def __init__( self : Dict ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowercase_ )
def A_ ( cls : List[Any] , lowercase_ : Optional[int] , **lowercase_ : Dict ):
snake_case_ = kwargs.pop('''config''' , lowercase_ )
snake_case_ = kwargs.pop('''trust_remote_code''' , lowercase_ )
snake_case_ = True
snake_case_ ,snake_case_ = FeatureExtractionMixin.get_feature_extractor_dict(lowercase_ , **lowercase_ )
snake_case_ = config_dict.get('''feature_extractor_type''' , lowercase_ )
snake_case_ = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
snake_case_ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
# It could be in `config.feature_extractor_type``
snake_case_ = getattr(lowercase_ , '''feature_extractor_type''' , lowercase_ )
if hasattr(lowercase_ , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
snake_case_ = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
snake_case_ = feature_extractor_class_from_name(lowercase_ )
snake_case_ = feature_extractor_auto_map is not None
snake_case_ = feature_extractor_class is not None or type(lowercase_ ) in FEATURE_EXTRACTOR_MAPPING
snake_case_ = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if has_remote_code and trust_remote_code:
snake_case_ = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_ )
snake_case_ = kwargs.pop('''code_revision''' , lowercase_ )
if os.path.isdir(lowercase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase_ ) in FEATURE_EXTRACTOR_MAPPING:
snake_case_ = FEATURE_EXTRACTOR_MAPPING[type(lowercase_ )]
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def A_ ( lowercase_ : Union[str, Any] , lowercase_ : Dict ):
FEATURE_EXTRACTOR_MAPPING.register(lowercase_ , lowercase_ )
| 593 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None # sigma(t_i)
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ) -> Dict:
"""simple docstring"""
return cls()
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = 1_0_0 , __SCREAMING_SNAKE_CASE = 1.007 , __SCREAMING_SNAKE_CASE = 8_0 , __SCREAMING_SNAKE_CASE = 0.05 , __SCREAMING_SNAKE_CASE = 5_0 , ) -> List[str]:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
return KarrasVeSchedulerState.create()
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ) -> KarrasVeSchedulerState:
"""simple docstring"""
UpperCamelCase__ : Any = jnp.arange(0 , __SCREAMING_SNAKE_CASE )[::-1].copy()
UpperCamelCase__ : Optional[int] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , schedule=jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=__SCREAMING_SNAKE_CASE , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Tuple[jnp.ndarray, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase__ : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase__ : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase__ : Optional[Any] = random.split(__SCREAMING_SNAKE_CASE , num=1 )
UpperCamelCase__ : Tuple = self.config.s_noise * random.normal(key=__SCREAMING_SNAKE_CASE , shape=sample.shape )
UpperCamelCase__ : Optional[int] = sigma + gamma * sigma
UpperCamelCase__ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
UpperCamelCase__ : str = sample_hat + sigma_hat * model_output
UpperCamelCase__ : Tuple = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
UpperCamelCase__ : int = sample_prev + sigma_prev * model_output
UpperCamelCase__ : str = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase__ : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
| 285 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DanceDiffusionPipeline
SCREAMING_SNAKE_CASE_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
SCREAMING_SNAKE_CASE_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__SCREAMING_SNAKE_CASE , use_timestep_embedding=__SCREAMING_SNAKE_CASE , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
UpperCamelCase__ : Union[str, Any] = IPNDMScheduler()
UpperCamelCase__ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Any:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCamelCase__ : Optional[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : str = DanceDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = pipe(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = output.audios
UpperCamelCase__ : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase__ : List[Any] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = torch_device
UpperCamelCase__ : Any = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
UpperCamelCase__ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = pipe(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase__ : str = output.audios
UpperCamelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase__ : Tuple = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = torch_device
UpperCamelCase__ : Union[str, Any] = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
UpperCamelCase__ : Tuple = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = pipe(generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase__ : List[Any] = output.audios
UpperCamelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase__ : Optional[Any] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 285 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_UpperCamelCase : List[str] =logging.get_logger(__name__)
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,*A__ ,**A__ ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,A__ ,)
super().__init__(*A__ ,**A__ )
| 332 |
from math import pow, sqrt
def a__ (*__lowercase :float ) -> bool:
_A : List[str] = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def a__ (__lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def a__ (__lowercase :float , __lowercase :float , __lowercase :float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 332 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCAmelCase_ = DetaConfig(
backbone_config=lowerCAmelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCAmelCase__ , with_box_refine=lowerCAmelCase__ , two_stage=lowerCAmelCase__ , )
# set labels
UpperCAmelCase_ = "huggingface/label-files"
if "o365" in model_name:
UpperCAmelCase_ = 366
UpperCAmelCase_ = "object365-id2label.json"
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# transformer decoder self-attention layers
UpperCAmelCase_ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:hidden_size]
UpperCAmelCase_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size:, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size:]
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_deta_config(lowerCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase_ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(lowerCAmelCase__ , param.shape )
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
if "input_proj" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetaForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
model.to(lowerCAmelCase__ )
# load image processor
UpperCAmelCase_ = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(images=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = model(pixel_values.to(lowerCAmelCase__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase_ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCAmelCase_ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCAmelCase_ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCAmelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCAmelCase__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 0 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Any:
debug_launcher(test_script.main )
def lowerCamelCase_ ( self ) -> Tuple:
debug_launcher(test_ops.main )
| 713 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[float] ) -> float:
"""simple docstring"""
UpperCAmelCase = 0.00
UpperCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(SCREAMING_SNAKE_CASE_ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE_ )
index += 1
return 1 / first_sum
def __snake_case ( SCREAMING_SNAKE_CASE_ : list[float] ) -> float:
"""simple docstring"""
UpperCAmelCase = 0.00
UpperCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase = f"Resistor at index {index} has a negative value!"
raise ValueError(SCREAMING_SNAKE_CASE_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 | '''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = filter(lambda UpperCamelCase__ : p.requires_grad , model.parameters() )
__UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase : List[str] = logging.getLogger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
if metric == "rouge2":
__UpperCAmelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__UpperCAmelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__UpperCAmelCase = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__UpperCAmelCase = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__UpperCAmelCase = ModelCheckpoint(
dirpath=UpperCamelCase__ , filename=UpperCamelCase__ , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=UpperCamelCase__ , verbose=UpperCamelCase__ , )
class A ( pl.Callback ):
def snake_case__ ( self : Union[str, Any] , __a : int , __a : Any ) -> Optional[Any]:
__UpperCAmelCase = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def snake_case__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Any=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCAmelCase = od / '''test_results.txt'''
__UpperCAmelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__UpperCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , '''a+''' ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCAmelCase = metrics[key]
if isinstance(__a , torch.Tensor ):
__UpperCAmelCase = val.item()
__UpperCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
__UpperCAmelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__a )
@rank_zero_only
def snake_case__ ( self : List[Any] , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
try:
__UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCAmelCase = pl_module.model.num_parameters()
__UpperCAmelCase = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def snake_case__ ( self : List[str] , __a : pl.Trainer , __a : pl.LightningModule ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , '''test''' )
@rank_zero_only
def snake_case__ ( self : Any , __a : pl.Trainer , __a : Union[str, Any] ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 262 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
'''simple docstring'''
_snake_case = field(default='''question-answering-extractive''', metadata={'''include_in_asdict_even_if_is_default''': True} )
_snake_case = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
_snake_case = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
_snake_case = "question"
_snake_case = "context"
_snake_case = "answers"
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 704 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : int = '▁'
snake_case_ : str = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
snake_case_ : Optional[Any] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
snake_case_ : Dict = {'vinai/bartpho-syllable': 1_024}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
UpperCamelCase = vocab_file
UpperCamelCase = monolingual_vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCamelCase = {}
UpperCamelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase__ ) not in self.fairseq_tokens_to_ids:
UpperCamelCase = cnt
cnt += 1
with open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
UpperCamelCase = line.strip().split()[0]
UpperCamelCase = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase__ ) not in self.fairseq_tokens_to_ids:
UpperCamelCase = len(self.fairseq_tokens_to_ids )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = ''''''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(lowerCamelCase__ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 350 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 325 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( _snake_case : str , _snake_case : List[str] , _snake_case : List[str] ) -> int:
'''simple docstring'''
_A = TaConfig.from_json_file(_snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
_A = TaForConditionalGeneration(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 505 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a = logging.get_logger(__name__)
a = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = '''van'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Union[str, Any]=224 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Optional[Any]=[7, 3, 3, 3] , _UpperCAmelCase : Optional[int]=[4, 2, 2, 2] , _UpperCAmelCase : Tuple=[64, 128, 320, 512] , _UpperCAmelCase : Optional[int]=[3, 3, 12, 3] , _UpperCAmelCase : Union[str, Any]=[8, 8, 4, 4] , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[Any]=1E-6 , _UpperCAmelCase : Optional[Any]=1E-2 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Union[str, Any]=0.0 , **_UpperCAmelCase : str , ):
super().__init__(**_UpperCAmelCase )
_A = image_size
_A = num_channels
_A = patch_sizes
_A = strides
_A = hidden_sizes
_A = depths
_A = mlp_ratios
_A = hidden_act
_A = initializer_range
_A = layer_norm_eps
_A = layer_scale_init_value
_A = drop_path_rate
_A = dropout_rate
| 505 | 1 |
import os
import numpy
import onnx
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Union[str, Any] = a.name
_lowercase : Tuple = b.name
_lowercase : Dict = ''
_lowercase : Union[str, Any] = ''
_lowercase : Tuple = a == b
_lowercase : Union[str, Any] = name_a
_lowercase : Tuple = name_b
return res
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase_ , lowercase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase_ , lowercase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase_ , lowercase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase_ , lowercase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase : Dict = list(model.graph.initializer )
_lowercase : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowercase : Dict = inits[i].name
_lowercase : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase_ , lowercase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
_lowercase : Tuple = os.path.dirname(lowercase_ )
_lowercase : Any = os.path.basename(lowercase_ )
_lowercase : List[Any] = onnx.load(os.path.join(lowercase_ , lowercase_ ) )
_lowercase : Tuple = list(model.graph.initializer )
_lowercase : Tuple = set()
_lowercase : int = {}
_lowercase : List[Any] = []
_lowercase : int = 0
for i in range(len(lowercase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase_ )
dup_set.add(lowercase_ )
_lowercase : List[str] = inits[j].data_type
_lowercase : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase_ )
total_reduced_size += mem_size
_lowercase : Tuple = inits[i].name
_lowercase : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase_ )
else:
_lowercase : Any = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
_lowercase : Dict = sorted(lowercase_ )
_remove_dup_initializers_from_model(lowercase_ , lowercase_ , lowercase_ )
_lowercase : Optional[int] = 'optimized_' + model_file_name
_lowercase : Union[str, Any] = os.path.join(lowercase_ , lowercase_ )
onnx.save(lowercase_ , lowercase_ )
return new_model
| 89 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_UpperCAmelCase : Dict = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_UpperCAmelCase : Union[str, Any] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_UpperCAmelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _A( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=False ):
if rouge_types is None:
lowercase =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowercase =rouge_scorer.RougeScorer(rouge_types=snake_case_ , use_stemmer=snake_case_ )
if use_aggregator:
lowercase =scoring.BootstrapAggregator()
else:
lowercase =[]
for ref, pred in zip(snake_case_ , snake_case_ ):
lowercase =scorer.score(snake_case_ , snake_case_ )
if use_aggregator:
aggregator.add_scores(snake_case_ )
else:
scores.append(snake_case_ )
if use_aggregator:
lowercase =aggregator.aggregate()
else:
lowercase ={}
for key in scores[0]:
lowercase =[score[key] for score in scores]
return result
| 72 | 0 |
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 328 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 328 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
__lowerCAmelCase = datasets.logging.get_logger(__name__)
__lowerCAmelCase = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__lowerCAmelCase = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__lowerCAmelCase = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowercase ( self , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "default":
_UpperCamelCase = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
_UpperCamelCase = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
"""simple docstring"""
if gpus is None:
_UpperCamelCase = 1 if torch.cuda.is_available() else 0
_UpperCamelCase = {"""src""": sources, """mt""": predictions, """ref""": references}
_UpperCamelCase = [dict(zip(snake_case_ , snake_case_ ) ) for t in zip(*data.values() )]
_UpperCamelCase = self.scorer.predict(snake_case_ , gpus=snake_case_ , progress_bar=snake_case_ )
return {"mean_score": mean_score, "scores": scores}
| 147 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
_lowerCAmelCase : Any = prime_factors(_lowerCamelCase )
if is_square_free(_lowerCamelCase ):
return -1 if len(_lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207 |
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> float:
a__ : Optional[Any] = 0
while len(__UpperCamelCase ) > 1:
a__ : str = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
a__ : List[str] = files.index(min(__UpperCamelCase ) )
temp += files[min_index]
files.pop(__UpperCamelCase )
files.append(__UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class A ( SCREAMING_SNAKE_CASE__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case__ :str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
snake_case__ :ClassVar[Features] = Features({'text': Value('string' )} )
snake_case__ :ClassVar[Features] = Features({'summary': Value('string' )} )
snake_case__ :str = "text"
snake_case__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 48 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCamelCase = 'http://www.mocksite.com/file1.txt'
UpperCamelCase = '"text": ["foo", "foo"]'
UpperCamelCase = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class _A :
lowercase_ : Any = 200
lowercase_ : Dict = {'''Content-Length''': '''100'''}
lowercase_ : Tuple = {}
def a ( self : str , **lowerCamelCase__ : Optional[Any] ):
"""simple docstring"""
return [bytes(lowerCamelCase__ , """utf-8""" )]
def __lowerCamelCase ( *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Optional[int] ) -> str:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> List[Any]:
import requests
monkeypatch.setattr(__lowerCAmelCase , """request""" , __lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = URL
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : int = url
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : Tuple = [url]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : List[Any] = {"""train""": url}
__UpperCamelCase : str = """dummy"""
__UpperCamelCase : int = """downloads"""
__UpperCamelCase : List[Any] = tmp_path
__UpperCamelCase : str = DownloadConfig(
cache_dir=os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , use_etag=__lowerCAmelCase , )
__UpperCamelCase : List[Any] = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
__UpperCamelCase : Tuple = dl_manager.download(__lowerCAmelCase )
__UpperCamelCase : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : Tuple = [downloaded_paths]
__UpperCamelCase : int = [urls]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
__UpperCamelCase : Dict = downloaded_paths.values()
__UpperCamelCase : Tuple = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__UpperCamelCase : Tuple = Path(__lowerCAmelCase )
__UpperCamelCase : Optional[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__UpperCamelCase : Dict = downloaded_path.read_text()
assert content == CONTENT
__UpperCamelCase : Optional[Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__UpperCamelCase : List[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> Tuple:
__UpperCamelCase : Optional[int] = str(__lowerCAmelCase )
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : List[str] = filename
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : List[str] = [filename]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : List[str] = {"""train""": filename}
__UpperCamelCase : Optional[Any] = """dummy"""
__UpperCamelCase : Tuple = xz_file.parent
__UpperCamelCase : Optional[int] = """extracted"""
__UpperCamelCase : Dict = DownloadConfig(
cache_dir=__lowerCAmelCase , use_etag=__lowerCAmelCase , )
__UpperCamelCase : List[str] = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
__UpperCamelCase : int = dl_manager.extract(__lowerCAmelCase )
__UpperCamelCase : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__UpperCamelCase : Tuple = [extracted_paths]
__UpperCamelCase : List[str] = [paths]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in extracted_paths.keys()
__UpperCamelCase : Tuple = extracted_paths.values()
__UpperCamelCase : Any = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__UpperCamelCase : Dict = Path(__lowerCAmelCase )
__UpperCamelCase : Dict = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCAmelCase , etag=__lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__UpperCamelCase : int = extracted_path.read_text()
__UpperCamelCase : Tuple = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ) -> str:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(__lowerCAmelCase , start=1 ):
__UpperCamelCase : Dict = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ) -> List[str]:
__UpperCamelCase : List[Any] = request.getfixturevalue(__lowerCAmelCase )
__UpperCamelCase : Dict = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ) -> Dict:
__UpperCamelCase : Optional[Any] = request.getfixturevalue(__lowerCAmelCase )
__UpperCamelCase : Dict = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
__UpperCamelCase : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCAmelCase ) , start=1 ):
assert os.path.basename(__lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 716 |
from sklearn.metrics import fa_score
import datasets
UpperCamelCase = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
UpperCamelCase = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
UpperCamelCase = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def a ( self : Optional[int] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def a ( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Tuple=1 , lowerCamelCase__ : Tuple="binary" , lowerCamelCase__ : Dict=None ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = fa_score(
lowerCamelCase__ , lowerCamelCase__ , labels=lowerCamelCase__ , pos_label=lowerCamelCase__ , average=lowerCamelCase__ , sample_weight=lowerCamelCase__ )
return {"f1": float(lowerCamelCase__ ) if score.size == 1 else score}
| 515 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
SCREAMING_SNAKE_CASE__ : int = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 10_00,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : Dict = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 10_00,
"block_out_channels": [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"sample_size": 2_56,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : Dict = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"num_train_timesteps": 2_01,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"num_train_timesteps": 1_51,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def lowercase ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.in_layers.0.weight']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.in_layers.0.bias']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.in_layers.2.weight']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.in_layers.2.bias']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.emb_layers.1.weight']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.emb_layers.1.bias']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.out_layers.0.weight']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.out_layers.0.bias']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.out_layers.3.weight']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.skip_connection.weight']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.norm.weight']
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.norm.bias']
SCREAMING_SNAKE_CASE_ = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE_ = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = checkpoint['time_embed.0.weight']
SCREAMING_SNAKE_CASE_ = checkpoint['time_embed.0.bias']
SCREAMING_SNAKE_CASE_ = checkpoint['time_embed.2.weight']
SCREAMING_SNAKE_CASE_ = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE_ = checkpoint['label_emb.weight']
SCREAMING_SNAKE_CASE_ = checkpoint['input_blocks.0.0.weight']
SCREAMING_SNAKE_CASE_ = checkpoint['input_blocks.0.0.bias']
SCREAMING_SNAKE_CASE_ = unet_config['down_block_types']
SCREAMING_SNAKE_CASE_ = unet_config['layers_per_block']
SCREAMING_SNAKE_CASE_ = unet_config['attention_head_dim']
SCREAMING_SNAKE_CASE_ = unet_config['block_out_channels']
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = channels_list[0]
for i, layer_type in enumerate(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = channels_list[i]
SCREAMING_SNAKE_CASE_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = F'down_blocks.{i}.resnets.{j}'
SCREAMING_SNAKE_CASE_ = F'input_blocks.{current_layer}.0'
SCREAMING_SNAKE_CASE_ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE_ = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = F'down_blocks.{i}.resnets.{j}'
SCREAMING_SNAKE_CASE_ = F'input_blocks.{current_layer}.0'
SCREAMING_SNAKE_CASE_ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE_ = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = F'down_blocks.{i}.attentions.{j}'
SCREAMING_SNAKE_CASE_ = F'input_blocks.{current_layer}.1'
SCREAMING_SNAKE_CASE_ = convert_attention(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE ) - 1:
SCREAMING_SNAKE_CASE_ = F'down_blocks.{i}.downsamplers.0'
SCREAMING_SNAKE_CASE_ = F'input_blocks.{current_layer}.0'
SCREAMING_SNAKE_CASE_ = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_layer += 1
SCREAMING_SNAKE_CASE_ = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE_ = 'mid_block.resnets.0'
SCREAMING_SNAKE_CASE_ = 'middle_block.0'
SCREAMING_SNAKE_CASE_ = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = 'mid_block.attentions.0'
SCREAMING_SNAKE_CASE_ = 'middle_block.1'
SCREAMING_SNAKE_CASE_ = convert_attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = 'mid_block.resnets.1'
SCREAMING_SNAKE_CASE_ = 'middle_block.2'
SCREAMING_SNAKE_CASE_ = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = unet_config['up_block_types']
for i, layer_type in enumerate(SCREAMING_SNAKE_CASE ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE_ = F'up_blocks.{i}.resnets.{j}'
SCREAMING_SNAKE_CASE_ = F'output_blocks.{current_layer}.0'
SCREAMING_SNAKE_CASE_ = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE ) - 1:
SCREAMING_SNAKE_CASE_ = F'up_blocks.{i}.upsamplers.0'
SCREAMING_SNAKE_CASE_ = F'output_blocks.{current_layer-1}.1'
SCREAMING_SNAKE_CASE_ = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE_ = F'up_blocks.{i}.resnets.{j}'
SCREAMING_SNAKE_CASE_ = F'output_blocks.{current_layer}.0'
SCREAMING_SNAKE_CASE_ = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = F'up_blocks.{i}.attentions.{j}'
SCREAMING_SNAKE_CASE_ = F'output_blocks.{current_layer}.1'
SCREAMING_SNAKE_CASE_ = convert_attention(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE ) - 1:
SCREAMING_SNAKE_CASE_ = F'up_blocks.{i}.upsamplers.0'
SCREAMING_SNAKE_CASE_ = F'output_blocks.{current_layer-1}.2'
SCREAMING_SNAKE_CASE_ = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = checkpoint['out.0.weight']
SCREAMING_SNAKE_CASE_ = checkpoint['out.0.bias']
SCREAMING_SNAKE_CASE_ = checkpoint['out.2.weight']
SCREAMING_SNAKE_CASE_ = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = strabool(args.class_cond)
SCREAMING_SNAKE_CASE__ : Dict = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : List[str] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Tuple = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : int = con_pt_to_diffuser(args.unet_path, unet_config)
SCREAMING_SNAKE_CASE__ : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Any = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : int = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : str = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
SCREAMING_SNAKE_CASE__ : List[Any] = CMStochasticIterativeScheduler(**scheduler_config)
SCREAMING_SNAKE_CASE__ : Tuple = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 205 |
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if index == r:
for j in range(SCREAMING_SNAKE_CASE ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
SCREAMING_SNAKE_CASE_ = arr[i]
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ : Dict = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 205 | 1 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__SCREAMING_SNAKE_CASE = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = 256
def A_ ( __lowercase ):
if len(__A ) < MIN_NUM_TOKENS:
return None
UpperCamelCase_ : Union[str, Any] =MinHash(num_perm=__A )
for token in set(__A ):
min_hash.update(token.encode() )
return min_hash
def A_ ( __lowercase ):
return {t for t in NON_ALPHA.split(__A ) if len(t.strip() ) > 0}
class a__ :
def __init__( self :Tuple , *,
_lowerCamelCase :float = 0.85 , ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =duplication_jaccard_threshold
UpperCamelCase_ : List[str] =NUM_PERM
UpperCamelCase_ : Any =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCamelCase_ : Tuple =defaultdict(_UpperCamelCase )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :Tuple , _lowerCamelCase :MinHash ):
'''simple docstring'''
UpperCamelCase_ : List[str] =self._index.query(_UpperCamelCase )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCamelCase )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
UpperCamelCase_ : Tuple =[]
for base, duplicates in self._duplicate_clusters.items():
UpperCamelCase_ : Optional[int] =[base] + list(_UpperCamelCase )
# reformat the cluster to be a list of dict
UpperCamelCase_ : Dict =[{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_UpperCamelCase )
return duplicate_clusters
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self.get_duplicate_clusters()
with open(_UpperCamelCase , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =element
UpperCamelCase_ : Optional[Any] =get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def A_ ( __lowercase ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : List[Any] =DuplicationIndex(duplication_jaccard_threshold=__A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A ) ) , max_queue_size=1_00 ) ):
di.add(__A , __A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Any =get_tokens(__A )
UpperCamelCase_ : Any =get_tokens(__A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__SCREAMING_SNAKE_CASE = None
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Any =[]
for elementa in cluster:
UpperCamelCase_ : Dict =_shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCamelCase_ : Union[str, Any] =_shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__A , __A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCamelCase_ : str =1
extremes.append(__A )
return extremes
def A_ ( __lowercase , __lowercase , __lowercase ):
global _shared_dataset
UpperCamelCase_ : int =dataset
UpperCamelCase_ : str =[]
UpperCamelCase_ : Optional[Any] =partial(_find_cluster_extremes_shared , jaccard_threshold=__A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A ) , ):
extremes_list.append(__A )
return extremes_list
def A_ ( __lowercase , __lowercase = 0.85 ):
UpperCamelCase_ : Tuple =make_duplicate_clusters(__A , __A )
UpperCamelCase_ : Optional[int] ={x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCamelCase_ : Optional[int] ={}
UpperCamelCase_ : Optional[int] =find_extremes(__A , __A , __A )
for extremes in extremes_clusters:
for element in extremes:
UpperCamelCase_ : Optional[Any] =element
UpperCamelCase_ : Any =duplicate_indices - set(extreme_dict.keys() )
UpperCamelCase_ : List[Any] =dataset.filter(lambda __lowercase , __lowercase : idx not in remove_indices , with_indices=__A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCamelCase_ : str =element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCamelCase_ : List[str] =extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__A )}''' )
print(F'''Number of duplicate clusters: {len(__A )}''' )
print(F'''Files in duplicate cluster: {len(__A )}''' )
print(F'''Unique files in duplicate cluster: {len(__A )}''' )
print(F'''Filtered dataset size: {len(__A )}''' )
return ds_filter, duplicate_clusters
| 715 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a__ ( A__ ):
def __init__( self :List[str] , *_lowerCamelCase :int , **_lowerCamelCase :str ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 395 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
'''simple docstring'''
snake_case = ['onnx']
def __init__( self : Dict , *__snake_case : Union[str, Any] , **__snake_case : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['onnx'] )
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , *__snake_case : Optional[int] , **__snake_case : str ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
@classmethod
def lowerCamelCase__ ( cls : Dict , *__snake_case : int , **__snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
| 246 |
from math import factorial
def a_ ( UpperCamelCase_ : int = 1_0_0 ) -> int:
"""simple docstring"""
return sum(int(UpperCamelCase_ ) for x in str(factorial(UpperCamelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 246 | 1 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
_snake_case = b * b - 4 * a * c
_snake_case = (-b + sqrt(__lowerCamelCase )) / (2 * a)
_snake_case = (-b - sqrt(__lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _UpperCAmelCase ( ) -> Any:
_snake_case , _snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 430 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 430 | 1 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = "pixel_values"
A_ = False
A_ = TimmBackboneConfig
def __init__( self: Tuple , __A: int , **__A: List[str] ) -> Tuple:
requires_backends(self , '''timm''' )
super().__init__(__A )
_A = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(__A , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
_A = getattr(__A , '''use_pretrained_backbone''' , __A )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
_A = config.out_indices if getattr(__A , '''out_indices''' , __A ) is not None else (-1,)
_A = timm.create_model(
config.backbone , pretrained=__A , features_only=config.features_only , in_chans=config.num_channels , out_indices=__A , **__A , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_A = self._backbone.return_layers
_A = {layer['''module''']: str(__A ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__A )
@classmethod
def __A ( cls: Dict , __A: Optional[int] , *__A: Optional[int] , **__A: Any ) -> int:
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
_A = kwargs.pop('''config''' , TimmBackboneConfig() )
_A = kwargs.pop('''use_timm_backbone''' , __A )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
_A = kwargs.pop('''num_channels''' , config.num_channels )
_A = kwargs.pop('''features_only''' , config.features_only )
_A = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
_A = kwargs.pop('''out_indices''' , config.out_indices )
_A = TimmBackboneConfig(
backbone=__A , num_channels=__A , features_only=__A , use_pretrained_backbone=__A , out_indices=__A , )
return super()._from_config(__A , **__A )
def __A ( self: Tuple , __A: List[Any] ) -> Tuple:
pass
def __A ( self: int , __A: str , __A: int=None , __A: Optional[Any]=None , __A: Optional[int]=None , **__A: Tuple ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_A = self._all_layers
_A = self._backbone(__A , **__A )
_A = self._return_layers
_A = tuple(hidden_states[i] for i in self.out_indices )
else:
_A = self._backbone(__A , **__A )
_A = None
_A = tuple(__A )
_A = tuple(__A ) if hidden_states is not None else None
if not return_dict:
_A = (feature_maps,)
if output_hidden_states:
_A = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__A , hidden_states=__A , attentions=__A )
| 484 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A = logging.get_logger('transformers.models.encodec')
__A = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__A = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__A = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__A = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__A = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A = []
__A = []
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_A = getattr(_lowercase , _lowercase )
if weight_type is not None:
_A = getattr(_lowercase , _lowercase ).shape
else:
_A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
elif weight_type == "running_mean":
_A = value
elif weight_type == "running_var":
_A = value
elif weight_type == "num_batches_tracked":
_A = value
elif weight_type == "weight_ih_l0":
_A = value
elif weight_type == "weight_hh_l0":
_A = value
elif weight_type == "bias_ih_l0":
_A = value
elif weight_type == "bias_hh_l0":
_A = value
elif weight_type == "weight_ih_l1":
_A = value
elif weight_type == "weight_hh_l1":
_A = value
elif weight_type == "bias_ih_l1":
_A = value
elif weight_type == "bias_hh_l1":
_A = value
else:
_A = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = []
if model_name == "encodec_24khz" or "encodec_32khz":
_A = MAPPING_24K
elif model_name == "encodec_48khz":
_A = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(f"""{name} was ignored""" )
continue
_A = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
_A = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_A = True
if "*" in mapped_key:
_A = name.split(_lowercase )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight_ih_l0" in name:
_A = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
_A = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
_A = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
_A = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
_A = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
_A = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
_A = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
_A = '''bias_hh_l1'''
elif "bias" in name:
_A = '''bias'''
elif "weight" in name:
_A = '''weight'''
elif "running_mean" in name:
_A = '''running_mean'''
elif "running_var" in name:
_A = '''running_var'''
elif "num_batches_tracked" in name:
_A = '''num_batches_tracked'''
else:
_A = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
if config_path is not None:
_A = EncodecConfig.from_pretrained(_lowercase )
else:
_A = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_A = [8, 5, 4, 4]
_A = [2.2]
_A = 64
_A = 3_20_00
_A = 20_48
_A = False
_A = False
_A = False
elif model_name == "encodec_48khz":
_A = [8, 5, 4, 2]
_A = [3.0, 6.0, 12.0, 24.0]
_A = 4_80_00
_A = 2
_A = False
_A = '''time_group_norm'''
_A = True
_A = 1.0
_A = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_A = EncodecModel(_lowercase )
_A = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowercase )
_A = torch.load(_lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_A = original_checkpoint['''best_state''']
recursively_load_weights(_lowercase , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 484 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Union[str, Any]=0.6 , UpperCAmelCase_ : Dict=None , ) ->Any:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: int =batch_size
lowerCamelCase__: Union[str, Any] =image_size
lowerCamelCase__: Dict =patch_size
lowerCamelCase__: List[Any] =num_channels
lowerCamelCase__: Tuple =is_training
lowerCamelCase__: Dict =use_labels
lowerCamelCase__: Optional[int] =hidden_size
lowerCamelCase__: Any =num_hidden_layers
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Optional[Any] =intermediate_size
lowerCamelCase__: str =hidden_act
lowerCamelCase__: int =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Optional[Any] =type_sequence_label_size
lowerCamelCase__: int =initializer_range
lowerCamelCase__: List[str] =mask_ratio
lowerCamelCase__: Optional[int] =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__: List[str] =(image_size // patch_size) ** 2
lowerCamelCase__: Union[str, Any] =int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: int =None
if self.use_labels:
lowerCamelCase__: int =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: Optional[Any] =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =ViTMAEModel(config=__A)
model.to(__A)
model.eval()
lowerCamelCase__: List[str] =model(__A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =ViTMAEForPreTraining(__A)
model.to(__A)
model.eval()
lowerCamelCase__: Tuple =model(__A)
lowerCamelCase__: List[str] =(self.image_size // self.patch_size) ** 2
lowerCamelCase__: str =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
lowerCamelCase__: Dict =1
lowerCamelCase__: str =ViTMAEForPreTraining(__A)
model.to(__A)
model.eval()
lowerCamelCase__: Any =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCamelCase__: Any =model(__A)
lowerCamelCase__: List[Any] =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =config_and_inputs
lowerCamelCase__: Any ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase_ = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =ViTMAEModelTester(self)
lowerCamelCase__: str =ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: str =model_class(__A)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCamelCase__: Union[str, Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: List[str] =model_class(__A)
lowerCamelCase__: int =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Any =[*signature.parameters.keys()]
lowerCamelCase__: Tuple =["pixel_values"]
self.assertListEqual(arg_names[:1] , __A)
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]) ->List[str]:
'''simple docstring'''
np.random.seed(2)
lowerCamelCase__: List[Any] =int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
lowerCamelCase__: Union[str, Any] =np.random.uniform(size=(self.model_tester.batch_size, num_patches))
lowerCamelCase__: List[str] =torch.from_numpy(__A)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__: Union[str, Any] =pt_noise
super().check_pt_tf_models(__A , __A , __A)
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Union[str, Any] =model_class(__A)
model.to(__A)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
lowerCamelCase__: int =model(**self._prepare_for_class(__A , __A))
lowerCamelCase__: Union[str, Any] =outputs[0].cpu().numpy()
lowerCamelCase__: Optional[int] =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A)
lowerCamelCase__: List[str] =model_class.from_pretrained(__A)
model.to(__A)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
lowerCamelCase__: Optional[Any] =model(**self._prepare_for_class(__A , __A))
# Make sure we don't have nans
lowerCamelCase__: Dict =after_outputs[0].cpu().numpy()
lowerCamelCase__: List[Any] =0
lowerCamelCase__: str =np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__A , 1E-5)
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.")
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.")
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load")
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Any =ViTMAEModel.from_pretrained(__A)
self.assertIsNotNone(__A)
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]:
'''simple docstring'''
np.random.seed(2)
lowerCamelCase__: Optional[Any] =ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base").to(__A)
lowerCamelCase__: int =self.default_image_processor
lowerCamelCase__: Union[str, Any] =prepare_img()
lowerCamelCase__: Dict =image_processor(images=__A , return_tensors="pt").to(__A)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__: Tuple =ViTMAEConfig()
lowerCamelCase__: Optional[Any] =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
lowerCamelCase__: Dict =np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
lowerCamelCase__: Union[str, Any] =model(**__A , noise=torch.from_numpy(__A).to(device=__A))
# verify the logits
lowerCamelCase__: Optional[int] =torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , __A)
lowerCamelCase__: str =torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__A) , atol=1E-4))
| 718 |
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: str =sorted(zip(__a , __a ) , key=lambda __a : x[0] / x[1] , reverse=__a )
lowerCamelCase__ , lowerCamelCase__: str =[i[0] for i in r], [i[1] for i in r]
lowerCamelCase__: Tuple =list(accumulate(__a ) )
lowerCamelCase__: Any =bisect(__a , __a )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 'philschmid/bart-large-cnn-samsum'
SCREAMING_SNAKE_CASE : str = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
SCREAMING_SNAKE_CASE : List[str] = 'summarizer'
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : List[str] = ['text']
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ):
return self.pre_processor(lowercase__ ,return_tensors='''pt''' ,truncation=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ):
return self.model.generate(**lowercase__ )[0]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any] ):
return self.pre_processor.decode(lowercase__ ,skip_special_tokens=lowercase__ ,clean_up_tokenization_spaces=lowercase__ )
| 624 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase__ = numpy.array([0, 0])
lowerCAmelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase__ = numpy.array([1, 0])
lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = initial_vectors
for _ in range(A__ ):
__lowercase = iteration_step(A__ )
return vectors
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowercase = vectors[i + 1]
new_vectors.append(A__ )
__lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = numpy.radians(A__ )
__lowercase , __lowercase = numpy.cos(A__ ), numpy.sin(A__ )
__lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowercase , __lowercase = zip(*A__ )
plt.plot(A__ , A__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 624 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :Optional[Any] = BioGptTokenizer
UpperCamelCase :List[str] = False
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase__ : Dict = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCamelCase__ : int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : Any = """lower newer"""
lowerCamelCase__ : Union[str, Any] = """lower newer"""
return input_text, output_text
def _snake_case (self ):
lowerCamelCase__ : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ : Optional[int] = """lower"""
lowerCamelCase__ : Union[str, Any] = ["""low""", """er</w>"""]
lowerCamelCase__ : Optional[int] = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCamelCase__ : Optional[Any] = tokens + ["""<unk>"""]
lowerCamelCase__ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCamelCase__ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
lowerCamelCase__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
lowerCamelCase__ : int = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowerCamelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 157 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def _A (UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any]=8 ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __A ( A_ ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , ):
super().__init__()
self.register_modules(
unet=__magic_name__ , scheduler=__magic_name__ , movq=__magic_name__ , )
lowerCamelCase__ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if latents is None:
lowerCamelCase__ : Any = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowerCamelCase__ : Optional[int] = latents.to(__magic_name__ )
lowerCamelCase__ : Dict = latents * scheduler.init_noise_sigma
return latents
def _snake_case (self , __magic_name__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase__ : int = torch.device(f"cuda:{gpu_id}" )
lowerCamelCase__ : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
def _snake_case (self , __magic_name__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase__ : Any = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__magic_name__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = cpu_offload_with_hook(__magic_name__ , __magic_name__ , prev_module_hook=__magic_name__ )
# We'll offload the last model manually.
lowerCamelCase__ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case (self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ = 512 , __magic_name__ = 512 , __magic_name__ = 100 , __magic_name__ = 4.0 , __magic_name__ = 1 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , ):
lowerCamelCase__ : Any = self._execution_device
lowerCamelCase__ : List[Any] = guidance_scale > 1.0
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : List[Any] = torch.cat(__magic_name__ , dim=0 )
lowerCamelCase__ : str = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : Tuple = torch.cat(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ : Tuple = image_embeds.repeat_interleave(__magic_name__ , dim=0 )
lowerCamelCase__ : str = negative_image_embeds.repeat_interleave(__magic_name__ , dim=0 )
lowerCamelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__magic_name__ )
self.scheduler.set_timesteps(__magic_name__ , device=__magic_name__ )
lowerCamelCase__ : Optional[int] = self.scheduler.timesteps
lowerCamelCase__ : Tuple = self.unet.config.in_channels
lowerCamelCase__ ,lowerCamelCase__ : int = downscale_height_and_width(__magic_name__ , __magic_name__ , self.movq_scale_factor )
# create initial latent
lowerCamelCase__ : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __magic_name__ , __magic_name__ , __magic_name__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : str = {"""image_embeds""": image_embeds}
lowerCamelCase__ : Any = self.unet(
sample=__magic_name__ , timestep=__magic_name__ , encoder_hidden_states=__magic_name__ , added_cond_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
if do_classifier_free_guidance:
lowerCamelCase__ ,lowerCamelCase__ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase__ ,lowerCamelCase__ : Any = noise_pred.chunk(2 )
lowerCamelCase__ ,lowerCamelCase__ : str = variance_pred.chunk(2 )
lowerCamelCase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ ,lowerCamelCase__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Tuple = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ , )[0]
# post-processing
lowerCamelCase__ : Optional[int] = self.movq.decode(__magic_name__ , force_not_quantize=__magic_name__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowerCamelCase__ : Tuple = image * 0.5 + 0.5
lowerCamelCase__ : Tuple = image.clamp(0 , 1 )
lowerCamelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 157 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : int = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase : str = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
__lowerCamelCase : Dict = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = ["input_ids", "attention_mask"]
a_ = DistilBertTokenizer
def __init__( self : int , __A : Any=None , __A : Union[str, Any]=None , __A : str=True , __A : Union[str, Any]="[UNK]" , __A : Tuple="[SEP]" , __A : List[Any]="[PAD]" , __A : List[Any]="[CLS]" , __A : Tuple="[MASK]" , __A : str=True , __A : List[str]=None , **__A : Optional[Any] , ):
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
snake_case__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase__ ) != tokenize_chinese_chars
):
snake_case__ : Dict = getattr(lowercase__ , normalizer_state.pop("type" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : List[Any] = tokenize_chinese_chars
snake_case__ : int = normalizer_class(**lowercase__ )
snake_case__ : Optional[int] = do_lower_case
def _lowercase ( self : str , __A : Dict , __A : str=None ):
snake_case__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self : List[Any] , __A : Union[str, Any] , __A : Any = None ):
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : str , __A : Any , __A : str = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
| 701 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 50 ) -> int:
_lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : int , snake_case : Optional[Any] , snake_case : Optional[Any]=13 , snake_case : str=7 , snake_case : Optional[Any]=True , snake_case : int=True , snake_case : str=True , snake_case : List[str]=True , snake_case : Optional[Any]=99 , snake_case : Optional[int]=32 , snake_case : Optional[int]=5 , snake_case : Optional[Any]=4 , snake_case : Optional[Any]=37 , snake_case : str="gelu" , snake_case : List[str]=0.1 , snake_case : List[str]=0.1 , snake_case : int=512 , snake_case : Union[str, Any]=16 , snake_case : Optional[Any]=2 , snake_case : int=0.02 , snake_case : str=4 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : List[str] = use_attention_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : List[str] = num_choices
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=snake_case , )
return config, input_ids, attention_mask
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : int = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class_name.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
@require_flax
class lowercase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE : List[str] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE : str = model(snake_case , attention_mask=snake_case )[0]
SCREAMING_SNAKE_CASE : int = (1, 11, 768)
self.assertEqual(output.shape , snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) ) | 352 | 0 |
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(A ) )
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
if index == len(A ):
return True
# Recursive Step
for i in range(A ):
if valid_coloring(graph[index] , A , A ):
# Color current vertex
UpperCAmelCase__ =i
# Validate coloring
if util_color(A , A , A , index + 1 ):
return True
# Backtrack
UpperCAmelCase__ =-1
return False
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =[-1] * len(A )
if util_color(A , A , A , 0 ):
return colored_vertices
return []
| 714 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =torch.load(A , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase__ =torch.load(A , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase__ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(A )
UpperCAmelCase__ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase__ =sd.pop(A )
UpperCAmelCase__ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase__ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase__ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase__ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase__ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase__ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =torch.split(A , depth // 3 , dim=0 )
UpperCAmelCase__ =q
UpperCAmelCase__ =k
UpperCAmelCase__ =v
del sd[key]
return sd
@torch.no_grad()
def _UpperCAmelCase ( A , A , A=None ):
'''simple docstring'''
UpperCAmelCase__ =load_checkpoint(A )
if config is not None:
UpperCAmelCase__ =OPTConfig.from_pretrained(A )
else:
UpperCAmelCase__ =OPTConfig()
UpperCAmelCase__ =OPTModel(A ).half().eval()
model.load_state_dict(A )
# Check results
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCamelCase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 510 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __magic_name__ :
def __init__( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[str]="resnet50" , lowerCamelCase__ : str=3 , lowerCamelCase__ : List[Any]=3_2 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=True , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : List[Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Optional[Any] = stage_names
lowerCAmelCase : List[Any] = out_features
lowerCAmelCase : Optional[Any] = backbone
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Optional[Any] = is_training
def _A ( self : Any ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values
def _A ( self : int ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _A ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : str ):
lowerCAmelCase : List[str] = TimmBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def _A ( self : int ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : str = config_and_inputs
lowerCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __magic_name__ ( snake_case, snake_case, snake_case, unittest.TestCase ):
_lowerCAmelCase = (TimmBackbone,) if is_torch_available() else ()
_lowerCAmelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _A ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = TimmBackboneModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _A ( self : Optional[Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self : int ):
lowerCAmelCase : Any = '''resnet18'''
lowerCAmelCase : Optional[Any] = '''microsoft/resnet-18'''
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ )
lowerCAmelCase : Dict = AutoBackbone.from_pretrained(lowerCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Optional[int] = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ , out_indices=[1, 2, 3] )
lowerCAmelCase : Optional[int] = AutoBackbone.from_pretrained(lowerCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _A ( self : List[str] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _A ( self : Optional[int] ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _A ( self : int ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _A ( self : str ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _A ( self : Dict ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _A ( self : List[Any] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _A ( self : List[str] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _A ( self : Optional[int] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _A ( self : List[Any] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _A ( self : int ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _A ( self : Tuple ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _A ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _A ( self : Tuple ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _A ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _A ( self : int ):
pass
def _A ( self : Union[str, Any] ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(lowerCamelCase__ )
lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _A ( self : int ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Tuple = True
lowerCAmelCase : Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Dict = self.all_model_classes[0]
lowerCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
lowerCAmelCase : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase : int = model(**lowerCamelCase__ )
lowerCAmelCase : Optional[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _A ( self : Tuple ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : str = copy.deepcopy(lowerCamelCase__ )
lowerCAmelCase : int = None
lowerCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : Any = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
lowerCAmelCase : Any = False
lowerCAmelCase : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : List[Any] = model(**lowerCamelCase__ )
| 348 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def _A ( *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ):
pass
def UpperCAmelCase__ ( __magic_name__ : Image ):
'''simple docstring'''
lowerCAmelCase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCAmelCase__ ( __magic_name__ : Image ):
'''simple docstring'''
lowerCAmelCase : Tuple = np.array(__magic_name__ )
lowerCAmelCase : Dict = npimg.shape
return {"hash": hashimage(__magic_name__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
_lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _A ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ):
lowerCAmelCase : List[str] = MaskGenerationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _A ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _A ( self : Optional[int] ):
pass
@slow
@require_torch
def _A ( self : Optional[int] ):
lowerCAmelCase : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
lowerCAmelCase : Optional[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_5_6 )
# Shortening by hashing
lowerCAmelCase : List[Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_6_7},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_3},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_0_9},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_7_9},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_3_4},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_7_1_6},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_6_1_2},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_9_9},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_5_2},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_3_2},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_1_6},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_9_9},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_8_3},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_6_4},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_0_8},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_3_5},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_2_6},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_2_6_2},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_9_9},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_6},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_4},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_3},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def _A ( self : Any ):
lowerCAmelCase : List[str] = '''facebook/sam-vit-huge'''
lowerCAmelCase : List[str] = pipeline('''mask-generation''' , model=lowerCamelCase__ )
lowerCAmelCase : List[Any] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
lowerCAmelCase : str = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1_0},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3},
] , )
| 348 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : list, UpperCamelCase__ : int ):
'''simple docstring'''
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase__ = [p / w for p, w in zip(UpperCamelCase__, UpperCamelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase__ = sorted(UpperCamelCase__ )
# declaring useful variables
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase__ = sorted_profit_by_weight[length - i - 1]
UpperCamelCase__ = profit_by_weight.index(UpperCamelCase__ )
UpperCamelCase__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
lowercase = [int(x) for x in input("""Input profits separated by spaces: """).split()]
lowercase = [int(x) for x in input("""Input weights separated by spaces: """).split()]
lowercase = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 591 | def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = 1
while len(UpperCamelCase__ ) < 1e6:
constant.append(str(UpperCamelCase__ ) )
i += 1
UpperCamelCase__ = ''''''.join(UpperCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 591 | 1 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
lowerCAmelCase = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any] ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] ={
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCamelCase__ : Union[str, Any] =int(re.match(R'.*layer_(\d*).*' , snake_case_ )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->Dict:
if dtype == torch.bool:
return 1 / 8
lowerCamelCase__ : int =re.search(R'[^\d](\d+)$' , str(snake_case_ ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
lowerCamelCase__ : Tuple =int(bit_search.groups()[0] )
return bit_size // 8
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ) ->str:
# Construct model
if bloom_config_file == "":
lowerCamelCase__ : List[str] =BloomConfig()
else:
lowerCamelCase__ : List[str] =BloomConfig.from_json_file(snake_case_ )
if shard_model:
lowerCamelCase__ : int =os.listdir(snake_case_ )
lowerCamelCase__ : List[str] =sorted(filter(lambda snake_case_ : s.startswith('layer' ) and "model_00" in s , snake_case_ ) )
lowerCamelCase__ : Any ={'weight_map': {}, 'metadata': {}}
lowerCamelCase__ : Any =0
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : int =BloomConfig()
for j, file in enumerate(snake_case_ ):
print('Processing file: {}'.format(snake_case_ ) )
lowerCamelCase__ : Any =None
for i in range(snake_case_ ):
# load all TP files
lowerCamelCase__ : Tuple =file.replace('model_00' , f"""model_0{i}""" )
lowerCamelCase__ : str =torch.load(os.path.join(snake_case_ , snake_case_ ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCamelCase__ : int =list(temp.keys() )
for key in keys:
lowerCamelCase__ : Optional[int] =temp.pop(snake_case_ )
if tensors is None:
lowerCamelCase__ : Tuple =temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase__ : Optional[int] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase__ : List[str] =torch.cat([tensors[key], temp[key]] , dim=snake_case_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase__ : List[Any] =tensors[key] / pretraining_tp
torch.save(
snake_case_ , os.path.join(
snake_case_ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(snake_case_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCamelCase__ : int =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCamelCase__ : List[str] ='pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(snake_case_ ) ).zfill(5 ) )
lowerCamelCase__ : Optional[Any] =BloomConfig()
lowerCamelCase__ : str =pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCamelCase__ : str =total_size
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(snake_case_ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
lowerCamelCase__ : Any =json.dumps(snake_case_ , indent=2 , sort_keys=snake_case_ ) + '\n'
f.write(snake_case_ )
else:
lowerCamelCase__ : List[Any] =BloomModel(snake_case_ )
lowerCamelCase__ : Optional[Any] =os.listdir(snake_case_ )
lowerCamelCase__ : List[Any] =sorted(filter(lambda snake_case_ : s.startswith('layer' ) and "model_00" in s , snake_case_ ) )
lowerCamelCase__ : Union[str, Any] =None
for i, file in enumerate(snake_case_ ):
lowerCamelCase__ : Dict =None
for i in range(snake_case_ ):
# load all TP files
lowerCamelCase__ : int =file.replace('model_00' , f"""model_0{i}""" )
lowerCamelCase__ : str =torch.load(os.path.join(snake_case_ , snake_case_ ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCamelCase__ : Optional[int] =list(temp.keys() )
for key in keys:
lowerCamelCase__ : str =temp.pop(snake_case_ )
if tensors is None:
lowerCamelCase__ : Optional[int] =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCamelCase__ : List[str] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCamelCase__ : str =torch.cat([tensors[key], temp[key]] , dim=snake_case_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCamelCase__ : List[Any] =tensors[key] / pretraining_tp
lowerCamelCase__ : List[Any] =model.load_state_dict(snake_case_ , strict=snake_case_ )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
lowerCamelCase__ : Dict =set(other_keys.missing_keys )
else:
lowerCamelCase__ : List[str] =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(snake_case_ , exist_ok=snake_case_ )
lowerCamelCase__ : Union[str, Any] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase__ : str =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
lowerCamelCase__ : int =model.to(config.torch_dtype )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
lowerCAmelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 174 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Any =self.dummy_uncond_unet
lowerCamelCase__ : Any =KarrasVeScheduler()
lowerCamelCase__ : List[str] =KarrasVePipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =torch.manual_seed(0 )
lowerCamelCase__ : Any =pipe(num_inference_steps=2 , generator=lowerCamelCase_ , output_type='numpy' ).images
lowerCamelCase__ : Optional[Any] =torch.manual_seed(0 )
lowerCamelCase__ : str =pipe(num_inference_steps=2 , generator=lowerCamelCase_ , output_type='numpy' , return_dict=lowerCamelCase_ )[0]
lowerCamelCase__ : int =image[0, -3:, -3:, -1]
lowerCamelCase__ : Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Dict =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : List[str] ='google/ncsnpp-celebahq-256'
lowerCamelCase__ : Dict =UNetaDModel.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : List[str] =KarrasVeScheduler()
lowerCamelCase__ : List[str] =KarrasVePipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] =torch.manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(num_inference_steps=20 , generator=lowerCamelCase_ , output_type='numpy' ).images
lowerCamelCase__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase__ : Optional[Any] =np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 174 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 707 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowercase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=lowercase__ )
if len(lowercase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=lowercase__ )
with open(f"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(f"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
f"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=lowercase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=lowercase__ )
return parser.parse_args()
if __name__ == "__main__":
A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 46 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : list[int] ):
lowerCamelCase_ = len(UpperCAmelCase_ ) // 2
# choose the middle 3 elements
lowerCamelCase_ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=lowercase ):
"""simple docstring"""
_lowerCamelCase = ["onnx"]
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
@classmethod
def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
| 675 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class snake_case__(unittest.TestCase , _UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : List[str] ):
lowercase__ : Dict = load_tool("text-to-speech" )
self.tool.setup()
def snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = self.tool("hey" )
lowercase__ : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = self.tool("hey" )
lowercase__ : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 701 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
| 187 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = None
__A = BloomTokenizerFast
__A = BloomTokenizerFast
__A = True
__A = False
__A = """tokenizer_file"""
__A = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def __lowerCAmelCase ( self ):
"""simple docstring"""
super().setUp()
snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **__UpperCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.get_rust_tokenizer()
snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
snake_case_ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
snake_case_ = tokenizer.batch_encode_plus(__UpperCamelCase )['input_ids']
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case_ = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case_ = 'This is a simple input'
snake_case_ = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ = ('This is a simple input', 'This is a pair')
snake_case_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(__UpperCamelCase , max_length=__UpperCamelCase )
tokenizer_r.encode_plus(__UpperCamelCase , max_length=__UpperCamelCase )
tokenizer_r.batch_encode_plus(__UpperCamelCase , max_length=__UpperCamelCase )
tokenizer_r.encode(__UpperCamelCase , max_length=__UpperCamelCase )
tokenizer_r.batch_encode_plus(__UpperCamelCase , max_length=__UpperCamelCase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
snake_case_ = None # Hotfixing padding = None
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.get_rust_tokenizer()
snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=__UpperCamelCase )
snake_case_ = next(iter(__UpperCamelCase ) )['premise'] # pick up one data
snake_case_ = list(sample_data.values() )
snake_case_ = list(map(tokenizer.encode , __UpperCamelCase ) )
snake_case_ = [tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) for x in output_tokens]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 187 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
_lowercase = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
_lowercase = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _snake_case ( snake_case__ : Tuple ):
A = (images / 2 + 0.5).clamp(0 , 1 )
A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = numpy_to_pil(snake_case__ )
return images
def _snake_case ( snake_case__ : Any ):
if images.ndim == 3:
A = images[None, ...]
A = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
A = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
A = [Image.fromarray(snake_case__ ) for image in images]
return pil_images | 22 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 1 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCamelCase : Optional[int] =299792458
# Symbols
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple =symbols('''ct x y z''')
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> float:
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> float:
return 1 / sqrt(1 - beta(__lowerCAmelCase ) ** 2 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return np.array(
[
[gamma(__lowerCAmelCase ), -gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), 0, 0],
[-gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), gamma(__lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
UpperCamelCase__ : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCamelCase : Dict =transform(29979245)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
lowerCamelCase : Optional[int] ={ct: c, x: 1, y: 1, z: 1}
lowerCamelCase : List[Any] =[four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""") | 228 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
UpperCamelCase__ : int = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCamelCase__ : str = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("RGB" )
UpperCamelCase__ : List[str] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
UpperCamelCase__ : Dict = transform(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
return image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
if "visual_encoder" in key:
UpperCamelCase__ : Union[str, Any] = re.sub("visual_encoder*" , "vision_model.encoder" , __lowerCAmelCase )
if "blocks" in key:
UpperCamelCase__ : List[str] = re.sub(R"blocks" , "layers" , __lowerCAmelCase )
if "attn" in key:
UpperCamelCase__ : List[str] = re.sub(R"attn" , "self_attn" , __lowerCAmelCase )
if "norm1" in key:
UpperCamelCase__ : List[str] = re.sub(R"norm1" , "layer_norm1" , __lowerCAmelCase )
if "norm2" in key:
UpperCamelCase__ : Optional[int] = re.sub(R"norm2" , "layer_norm2" , __lowerCAmelCase )
if "encoder.norm" in key:
UpperCamelCase__ : Optional[int] = re.sub(R"encoder.norm" , "post_layernorm" , __lowerCAmelCase )
if "encoder.patch_embed.proj" in key:
UpperCamelCase__ : List[Any] = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , __lowerCAmelCase )
if "encoder.pos_embed" in key:
UpperCamelCase__ : Union[str, Any] = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , __lowerCAmelCase )
if "encoder.cls_token" in key:
UpperCamelCase__ : Optional[Any] = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , __lowerCAmelCase )
if "self_attn" in key:
UpperCamelCase__ : Dict = re.sub(R"self_attn.proj" , "self_attn.projection" , __lowerCAmelCase )
return key
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Union[str, Any]:
if config_path is not None:
UpperCamelCase__ : List[str] = BlipConfig.from_pretrained(__lowerCAmelCase )
else:
UpperCamelCase__ : Union[str, Any] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCamelCase__ : Dict = BlipForConditionalGeneration(__lowerCAmelCase ).eval()
UpperCamelCase__ : Any = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCamelCase__ : List[Any] = blip_decoder(pretrained=__lowerCAmelCase , image_size=384 , vit="base" )
UpperCamelCase__ : str = pt_model.eval()
UpperCamelCase__ : Optional[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase__ : Union[str, Any] = modified_state_dict.pop(__lowerCAmelCase )
UpperCamelCase__ : Tuple = rename_key(__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = value
hf_model.load_state_dict(__lowerCAmelCase )
UpperCamelCase__ : Tuple = 384
UpperCamelCase__ : int = load_demo_image(image_size=__lowerCAmelCase , device="cpu" )
UpperCamelCase__ : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCamelCase__ : Tuple = tokenizer(["a picture of"] ).input_ids
UpperCamelCase__ : Optional[Any] = hf_model.generate(__lowerCAmelCase , __lowerCAmelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCamelCase__ : Optional[Any] = hf_model.generate(__lowerCAmelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase__ : int = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCamelCase__ : Union[str, Any] = blip_vqa(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="base" )
vqa_model.eval()
UpperCamelCase__ : List[str] = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase__ : Optional[int] = modified_state_dict.pop(__lowerCAmelCase )
UpperCamelCase__ : Dict = rename_key(__lowerCAmelCase )
UpperCamelCase__ : Optional[int] = value
UpperCamelCase__ : Tuple = BlipForQuestionAnswering(__lowerCAmelCase )
hf_vqa_model.load_state_dict(__lowerCAmelCase )
UpperCamelCase__ : List[str] = ["How many dogs are in this image?"]
UpperCamelCase__ : int = tokenizer(__lowerCAmelCase , return_tensors="pt" ).input_ids
UpperCamelCase__ : List[str] = hf_vqa_model.generate(__lowerCAmelCase , __lowerCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCamelCase__ : Union[str, Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCamelCase__ : Any = blip_itm(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit="base" )
itm_model.eval()
UpperCamelCase__ : List[Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase__ : Optional[int] = modified_state_dict.pop(__lowerCAmelCase )
UpperCamelCase__ : Any = rename_key(__lowerCAmelCase )
UpperCamelCase__ : List[str] = value
UpperCamelCase__ : List[str] = BlipForImageTextRetrieval(__lowerCAmelCase )
UpperCamelCase__ : Dict = ["A picture of a woman with a dog sitting in a beach"]
UpperCamelCase__ : Optional[Any] = tokenizer(
__lowerCAmelCase , return_tensors="pt" , padding="max_length" , truncation=__lowerCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowerCAmelCase )
hf_itm_model.eval()
UpperCamelCase__ : List[Any] = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowerCamelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Tuple =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 228 | 1 |
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = a[left_index]
__lowercase = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
__lowercase , __lowercase = a[i], a[j]
i += 1
__lowercase , __lowercase = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
if left < right:
__lowercase = random.randint(lowercase__ , right - 1 )
__lowercase , __lowercase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowercase = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
__lowercase = input("""Enter numbers separated by a comma:\n""" ).strip()
__lowercase = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 714 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 634 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Tuple = '''longformer'''
def __init__( self , UpperCamelCase__ = 512 , UpperCamelCase__ = 2 , UpperCamelCase__ = 1 , UpperCamelCase__ = 0 , UpperCamelCase__ = 2 , UpperCamelCase__ = 3_0522 , UpperCamelCase__ = 768 , UpperCamelCase__ = 12 , UpperCamelCase__ = 12 , UpperCamelCase__ = 3072 , UpperCamelCase__ = "gelu" , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 512 , UpperCamelCase__ = 2 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 1e-12 , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : int = attention_window
snake_case : Any = sep_token_id
snake_case : Dict = bos_token_id
snake_case : int = eos_token_id
snake_case : List[str] = vocab_size
snake_case : Dict = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Dict = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : Optional[int] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Any = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Union[str, Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : str = onnx_export
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = True
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case : str = super().outputs
if self.task == "default":
snake_case : str = {0: "batch"}
return outputs
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = super().generate_dummy_inputs(
preprocessor=UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
snake_case : Any = 1
return inputs
| 178 | 0 |
'''simple docstring'''
import baseaa
def lowerCamelCase_ ( lowercase__):
return baseaa.baaencode(string.encode("utf-8"))
def lowerCamelCase_ ( lowercase__):
return baseaa.baadecode(lowercase__).decode("utf-8")
if __name__ == "__main__":
__A : Any = """Hello World!"""
__A : List[Any] = baseaa_encode(test)
print(encoded)
__A : Tuple = baseaa_decode(encoded)
print(decoded)
| 709 |
'''simple docstring'''
from __future__ import annotations
class lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : int ) -> None:
'''simple docstring'''
lowerCamelCase__ = order
# a_{0} ... a_{k}
lowerCamelCase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase__ = [0.0] * self.order
def a__ ( self : Dict , __lowerCamelCase : list[float] , __lowerCamelCase : list[float] ) -> None:
'''simple docstring'''
if len(__lowerCamelCase ) < self.order:
lowerCamelCase__ = [1.0, *a_coeffs]
if len(__lowerCamelCase ) != self.order + 1:
lowerCamelCase__ = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__lowerCamelCase )}'''
)
raise ValueError(__lowerCamelCase )
if len(__lowerCamelCase ) != self.order + 1:
lowerCamelCase__ = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__lowerCamelCase )}'''
)
raise ValueError(__lowerCamelCase )
lowerCamelCase__ = a_coeffs
lowerCamelCase__ = b_coeffs
def a__ ( self : Dict , __lowerCamelCase : float ) -> float:
'''simple docstring'''
lowerCamelCase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase__ = self.input_history[:-1]
lowerCamelCase__ = self.output_history[:-1]
lowerCamelCase__ = sample
lowerCamelCase__ = result
return result
| 187 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : jnp.ndarray
@flax_register_to_config
class __UpperCAmelCase ( nn.Module , __lowerCAmelCase , __lowerCAmelCase ):
A__ : int = 3_2
A__ : int = 4
A__ : int = 4
A__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
A__ : Union[bool, Tuple[bool]] = False
A__ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
A__ : int = 2
A__ : Union[int, Tuple[int]] = 8
A__ : Optional[Union[int, Tuple[int]]] = None
A__ : int = 1_2_8_0
A__ : float = 0.0
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
A__ : bool = True
A__ : int = 0
A__ : bool = False
def _a ( self , _lowerCamelCase ):
# init input tensors
lowerCamelCase__ =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ =jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
lowerCamelCase__ =jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ =jax.random.split(_lowerCamelCase )
lowerCamelCase__ ={"params": params_rng, "dropout": dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def _a ( self ):
lowerCamelCase__ =self.block_out_channels
lowerCamelCase__ =block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ =FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
lowerCamelCase__ =self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =(only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ =[]
lowerCamelCase__ =block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ =output_channel
lowerCamelCase__ =block_out_channels[i]
lowerCamelCase__ =i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ =FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ =FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
lowerCamelCase__ =down_blocks
# mid
lowerCamelCase__ =FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase__ =[]
lowerCamelCase__ =list(reversed(_lowerCamelCase ) )
lowerCamelCase__ =list(reversed(_lowerCamelCase ) )
lowerCamelCase__ =list(reversed(_lowerCamelCase ) )
lowerCamelCase__ =reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase__ =output_channel
lowerCamelCase__ =reversed_block_out_channels[i]
lowerCamelCase__ =reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
lowerCamelCase__ =i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase__ =FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ =FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
lowerCamelCase__ =output_channel
lowerCamelCase__ =up_blocks
# out
lowerCamelCase__ =nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCamelCase__ =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ):
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
lowerCamelCase__ =jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ =jnp.expand_dims(_lowerCamelCase , 0 )
lowerCamelCase__ =self.time_proj(_lowerCamelCase )
lowerCamelCase__ =self.time_embedding(_lowerCamelCase )
# 2. pre-process
lowerCamelCase__ =jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
lowerCamelCase__ =self.conv_in(_lowerCamelCase )
# 3. down
lowerCamelCase__ =(sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ =down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ =down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase__ =()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ =new_down_block_res_samples
# 4. mid
lowerCamelCase__ =self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase__ =down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase__ =down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
lowerCamelCase__ =up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
lowerCamelCase__ =self.conv_norm_out(_lowerCamelCase )
lowerCamelCase__ =nn.silu(_lowerCamelCase )
lowerCamelCase__ =self.conv_out(_lowerCamelCase )
lowerCamelCase__ =jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 530 | """simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
lowerCamelCase__ =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCamelCase__ =1
if upper_limit > 0:
lowerCamelCase__ =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
a =int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 530 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__A = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _SCREAMING_SNAKE_CASE ( A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__snake_case : Any = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__snake_case : int = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__snake_case : List[str] = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''') | 61 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a_ :
_snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case ,__snake_case ,__snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case ,__snake_case ,__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : List[str] = import_module('tasks' )
try:
__snake_case : Any = getattr(A , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[Any] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(A ) )
__snake_case : Optional[Any] = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
__snake_case : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : int = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : str = np.argmax(A , axis=2 )
__snake_case ,__snake_case : int = preds.shape
__snake_case : Dict = [[] for _ in range(A )]
__snake_case : Union[str, Any] = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
__snake_case ,__snake_case : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
__snake_case : Optional[int] = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Optional[Any] = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : List[str] = trainer.evaluate()
__snake_case : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
__snake_case : str = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case ,__snake_case ,__snake_case : str = trainer.predict(A )
__snake_case ,__snake_case : List[str] = align_predictions(A , A )
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def _SCREAMING_SNAKE_CASE ( A : int ) -> Any:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 61 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
SCREAMING_SNAKE_CASE__ : str = """▁"""
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = AlbertTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , **_lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase__ : Dict = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : str = do_lower_case
UpperCAmelCase__ : Union[str, Any] = remove_space
UpperCAmelCase__ : List[str] = keep_accents
UpperCAmelCase__ : Optional[Any] = vocab_file
UpperCAmelCase__ : Optional[int] = False if not self.vocab_file else True
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : List[Any] = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Dict = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 79 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = RobertaTokenizer
__lowerCAmelCase = RobertaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = {"cls_token": "<s>"}
def snake_case_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a_ : str = dict(zip(a_ , range(len(a_ ) ) ) )
a_ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a_ : List[str] = {"unk_token": "<unk>"}
a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def snake_case_ ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , a_ ):
a_ : str = "lower newer"
a_ : int = "lower newer"
return input_text, output_text
def snake_case_ ( self ):
a_ : List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : Dict = "lower newer"
a_ : str = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a_ : Dict = tokenizer.tokenize(a_ ) # , add_prefix_space=True)
self.assertListEqual(a_ , a_ )
a_ : List[Any] = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def snake_case_ ( self ):
a_ : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=a_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=a_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def snake_case_ ( self ):
a_ : Dict = self.tokenizer_class.from_pretrained("roberta-base" )
a_ : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
a_ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
a_ : List[Any] = tokenizer.encode(
"sequence builders" , add_special_tokens=a_ , add_prefix_space=a_ )
a_ : Tuple = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=a_ , add_prefix_space=a_ )
a_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(a_ )
a_ : Tuple = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case_ ( self ):
a_ : List[str] = self.get_tokenizer()
a_ : str = "Encode this sequence."
a_ : str = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
a_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a_ , a_ )
a_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
a_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a_ , a_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a_ : int = tokenizer.encode(a_ , add_special_tokens=a_ )
a_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a_ , a_ )
# Testing spaces after special tokens
a_ : Optional[int] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(a_ , lstrip=a_ , rstrip=a_ )} ) # mask token has a left space
a_ : str = tokenizer.convert_tokens_to_ids(a_ )
a_ : Union[str, Any] = "Encode <mask> sequence"
a_ : Union[str, Any] = "Encode <mask>sequence"
a_ : int = tokenizer.encode(a_ )
a_ : Union[str, Any] = encoded.index(a_ )
a_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a_ , a_ )
a_ : str = tokenizer.encode(a_ )
a_ : List[str] = encoded.index(a_ )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a_ , a_ )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a_ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
a_ : List[Any] = self.tokenizer_class.from_pretrained(a_ , **a_ )
a_ : List[Any] = "A, <mask> AllenNLP sentence."
a_ : Optional[int] = tokenizer_r.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
a_ : int = tokenizer_p.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a_ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
a_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
a_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def snake_case_ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , a_ )
self.assertEqual(post_processor_state["add_prefix_space"] , a_ )
self.assertEqual(post_processor_state["trim_offsets"] , a_ )
def snake_case_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a_ : List[Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Tuple = F"""{text_of_1_token} {text_of_1_token}"""
a_ : Dict = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
a_ : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Optional[int] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
a_ : Dict = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : str = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
a_ : str = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
a_ : Union[str, Any] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : List[str] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
a_ : str = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
a_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Optional[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , ) | 237 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=24 , lowerCamelCase=2 , lowerCamelCase=6 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=1000 , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : int = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Any = use_input_mask
snake_case__ : int = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : str = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : List[Any] = initializer_range
snake_case__ : Any = num_labels
snake_case__ : List[str] = scope
snake_case__ : Optional[Any] = range_bbox
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case__ : int = bbox[i, j, 3]
snake_case__ : Dict = bbox[i, j, 1]
snake_case__ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case__ : List[str] = bbox[i, j, 2]
snake_case__ : List[str] = bbox[i, j, 0]
snake_case__ : List[str] = t
snake_case__ : List[str] = None
if self.use_input_mask:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Dict = None
snake_case__ : Any = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Any = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = LiltModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__UpperCamelCase , bbox=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case__ : Optional[Any] = model(__UpperCamelCase , bbox=__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case__ : Dict = model(__UpperCamelCase , bbox=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple = self.num_labels
snake_case__ : int = LiltForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : int = model(
__UpperCamelCase , bbox=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = LiltForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ : List[str] = model(
__UpperCamelCase , bbox=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,(
snake_case__
) ,
) : List[Any] = config_and_inputs
snake_case__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
return True
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Dict = LiltModelTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : List[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = LiltModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__UpperCamelCase )
snake_case__ : Union[str, Any] = torch.tensor([[1, 2]] , device=__UpperCamelCase )
snake_case__ : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(input_ids=__UpperCamelCase , bbox=__UpperCamelCase )
snake_case__ : Optional[Any] = torch.Size([1, 2, 768] )
snake_case__ : Optional[int] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCamelCase , atol=1E-3 ) )
| 715 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = "▁"
_lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
_lowerCAmelCase : Dict = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
_lowerCAmelCase : str = {"vinai/bartpho-syllable": 1_0_2_4}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ) -> None:
"""simple docstring"""
snake_case__ : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
snake_case__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
snake_case__ : int = vocab_file
snake_case__ : Optional[Any] = monolingual_vocab_file
snake_case__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : List[str] = cnt
cnt += 1
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : Optional[int] = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case__ : Any = len(self.fairseq_tokens_to_ids )
snake_case__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowercase__ ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : List[Any] = ''''''.join(lowerCamelCase ).replace(lowerCamelCase , ''' ''' ).strip()
return out_string
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , '''wb''' ) as fi:
snake_case__ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 694 | 0 |
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[str] = 0 , snake_case : int = 0 )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = end or len(A_ )
for i in range(A_ , A_ ):
UpperCAmelCase__ : int = i
UpperCAmelCase__ : Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCAmelCase__ : Optional[Any] = array[temp_index - 1]
temp_index -= 1
UpperCAmelCase__ : str = temp_index_value
return array
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] )-> List[str]: # Max Heap
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = index
UpperCAmelCase__ : Optional[Any] = 2 * index + 1 # Left Node
UpperCAmelCase__ : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCAmelCase__ : List[str] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCAmelCase__ : Optional[Any] = right_index
if largest != index:
UpperCAmelCase__ : Tuple = array[largest], array[index]
heapify(A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(A_ )
for i in range(n // 2 , -1 , -1 ):
heapify(A_ , A_ , A_ )
for i in range(n - 1 , 0 , -1 ):
UpperCAmelCase__ : Optional[int] = array[0], array[i]
heapify(A_ , 0 , A_ )
return array
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : str , snake_case : int , snake_case : int )-> Optional[int]:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : int , snake_case : str )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Any = low
UpperCAmelCase__ : List[str] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCAmelCase__ : Tuple = array[j], array[i]
i += 1
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict )-> Optional[int]:
'''simple docstring'''
if len(A_ ) == 0:
return array
UpperCAmelCase__ : Union[str, Any] = 2 * math.ceil(math.loga(len(A_ ) ) )
UpperCAmelCase__ : Any = 16
return intro_sort(A_ , 0 , len(A_ ) , A_ , A_ )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Optional[int] , snake_case : Tuple , snake_case : Any , snake_case : Union[str, Any] )-> str:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A_ )
max_depth -= 1
UpperCAmelCase__ : List[Any] = median_of_a(A_ , A_ , start + ((end - start) // 2) + 1 , end - 1 )
UpperCAmelCase__ : Optional[Any] = partition(A_ , A_ , A_ , A_ )
intro_sort(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase__ : str = p
return insertion_sort(A_ , A_ , A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Any = input("""Enter numbers separated by a comma : """).strip()
_lowerCAmelCase : Union[str, Any] = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 438 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__: Optional[Any] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[Any] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
A__: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 380 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """table-transformer"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : List[str] , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Any=1_0_0 , _lowerCAmelCase : Optional[Any]=6 , _lowerCAmelCase : Any=2_0_4_8 , _lowerCAmelCase : Dict=8 , _lowerCAmelCase : Optional[Any]=6 , _lowerCAmelCase : str=2_0_4_8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Union[str, Any]=1.0 , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Optional[int]="sine" , _lowerCAmelCase : Any="resnet50" , _lowerCAmelCase : str=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : str=1 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[Any]=0.1 , **_lowerCAmelCase : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowercase =CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =backbone_config.get('model_type')
__lowercase =CONFIG_MAPPING[backbone_model_type]
__lowercase =config_class.from_dict(_lowerCAmelCase)
# set timm attributes to None
__lowercase , __lowercase , __lowercase =None, None, None
__lowercase =use_timm_backbone
__lowercase =backbone_config
__lowercase =num_channels
__lowercase =num_queries
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =init_xavier_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =encoder_layers
__lowercase =auxiliary_loss
__lowercase =position_embedding_type
__lowercase =backbone
__lowercase =use_pretrained_backbone
__lowercase =dilation
# Hungarian matcher
__lowercase =class_cost
__lowercase =bbox_cost
__lowercase =giou_cost
# Loss coefficients
__lowercase =mask_loss_coefficient
__lowercase =dice_loss_coefficient
__lowercase =bbox_loss_coefficient
__lowercase =giou_loss_coefficient
__lowercase =eos_coefficient
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self.d_model
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return 1e-5
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return 1_2
| 702 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ):
"""simple docstring"""
__lowercase =cipher_alphabet or [chr(_lowerCAmelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowercase ={
'a': 0.0_84_97,
'b': 0.0_14_92,
'c': 0.0_22_02,
'd': 0.0_42_53,
'e': 0.1_11_62,
'f': 0.0_22_28,
'g': 0.0_20_15,
'h': 0.0_60_94,
'i': 0.0_75_46,
'j': 0.0_01_53,
'k': 0.0_12_92,
'l': 0.0_40_25,
'm': 0.0_24_06,
'n': 0.0_67_49,
'o': 0.0_75_07,
'p': 0.0_19_29,
'q': 0.0_00_95,
'r': 0.0_75_87,
's': 0.0_63_27,
't': 0.0_93_56,
'u': 0.0_27_58,
'v': 0.0_09_78,
'w': 0.0_25_60,
'x': 0.0_01_50,
'y': 0.0_19_94,
'z': 0.0_00_77,
}
else:
# Custom frequencies dictionary
__lowercase =frequencies_dict
if not case_sensitive:
__lowercase =ciphertext.lower()
# Chi squared statistic values
__lowercase ={}
# cycle through all of the shifts
for shift in range(len(_lowerCAmelCase ) ):
__lowercase =''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowercase =(alphabet_letters.index(letter.lower() ) - shift) % len(
_lowerCAmelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowercase =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowercase =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase =decrypted_with_shift.lower().count(_lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase =decrypted_with_shift.count(_lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowercase =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowerCAmelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowercase =min(
_lowerCAmelCase , key=_lowerCAmelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowercase
) , (
__lowercase
) ,
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 454 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCAmelCase__ : Optional[Any] =True
except ImportError:
lowerCAmelCase__ : List[Any] =False
try:
from torch.hub import _get_torch_home
lowerCAmelCase__ : Optional[int] =_get_torch_home()
except ImportError:
lowerCAmelCase__ : Dict =os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
lowerCAmelCase__ : int =os.path.join(torch_cache_home, 'transformers')
lowerCAmelCase__ : Optional[int] ='https://cdn.huggingface.co'
lowerCAmelCase__ : str ='https://s3.amazonaws.com/models.huggingface.co/bert'
lowerCAmelCase__ : Tuple ='/'.join(str(Path(__file__).resolve()).split('/')[:-1])
lowerCAmelCase__ : Union[str, Any] =os.path.join(PATH, 'config.yaml')
lowerCAmelCase__ : Optional[int] =os.path.join(PATH, 'attributes.txt')
lowerCAmelCase__ : Union[str, Any] =os.path.join(PATH, 'objects.txt')
lowerCAmelCase__ : Dict =os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
lowerCAmelCase__ : Union[str, Any] =os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
lowerCAmelCase__ : Tuple =os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
lowerCAmelCase__ : List[str] ='pytorch_model.bin'
lowerCAmelCase__ : List[Any] ='config.yaml'
def a__ ( A__=OBJECTS, A__=ATTRIBUTES ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
with open(A__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
with open(A__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Any = OrderedDict()
with open(A__, 'rb' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = pkl.load(A__ )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE_ : Tuple = ckp.pop(A__ )
if isinstance(A__, np.ndarray ):
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(A__ )
else:
assert isinstance(A__, torch.tensor ), type(A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = v
return r
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = {}
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = "root" , lowerCAmelCase__=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = name
SCREAMING_SNAKE_CASE_ : Union[str, Any] = level
SCREAMING_SNAKE_CASE_ : Any = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE_ : str = copy.deepcopy(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = copy.deepcopy(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Config(lowerCAmelCase__ , name=lowerCAmelCase__ , level=level + 1 )
SCREAMING_SNAKE_CASE_ : Dict = v
setattr(self , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = d
def __repr__( self ):
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = val
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = key.split('.' )
SCREAMING_SNAKE_CASE_ : Tuple = len(lowerCAmelCase__ ) - 1
SCREAMING_SNAKE_CASE_ : str = self._pointer
if len(lowerCAmelCase__ ) > 1:
for i, l in enumerate(lowerCAmelCase__ ):
if hasattr(self , lowerCAmelCase__ ) and isinstance(getattr(self , lowerCAmelCase__ ) , lowerCAmelCase__ ):
setattr(getattr(self , lowerCAmelCase__ ) , '.'.join(levels[i:] ) , lowerCAmelCase__ )
if l == last_level:
SCREAMING_SNAKE_CASE_ : Dict = val
else:
SCREAMING_SNAKE_CASE_ : int = pointer[l]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._pointer
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
with open(F'''{file_name}''' , 'w' ) as stream:
dump(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
with open(F'''{file_name}''' , 'w' ) as stream:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
with open(lowerCAmelCase__ ) as stream:
SCREAMING_SNAKE_CASE_ : Optional[int] = load(lowerCAmelCase__ , Loader=lowerCAmelCase__ )
return data
def __str__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ' '
if self._name != "root":
SCREAMING_SNAKE_CASE_ : int = F'''{t * (self._level-1)}{self._name}:\n'''
else:
SCREAMING_SNAKE_CASE_ : str = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(lowerCAmelCase__ ).__name__})\n'''
SCREAMING_SNAKE_CASE_ : Tuple = level
return r[:-1]
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
return cls(lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('cache_dir' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('force_download' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('resume_download' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('proxies' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('local_files_only' , lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
elif os.path.isfile(lowerCAmelCase__ ) or is_remote_url(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE_ : Any = hf_bucket_url(lowerCAmelCase__ , filename=lowerCAmelCase__ , use_cdn=lowerCAmelCase__ )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : Any = cached_path(
lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE_ : Optional[int] = Config.load_yaml(lowerCAmelCase__ )
except EnvironmentError:
SCREAMING_SNAKE_CASE_ : Tuple = 'Can\'t load config for'
raise EnvironmentError(lowerCAmelCase__ )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(lowerCAmelCase__ ), kwargs
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : str = torch.load('dump.pt', map_location=in_tensor.device )
SCREAMING_SNAKE_CASE_ : int = in_tensor.numpy()
SCREAMING_SNAKE_CASE_ : Any = out_tensor.numpy()[0]
print(na.shape, na[0, 0, :5] )
print(na.shape, na[0, 0, :5] )
assert np.allclose(A__, A__, rtol=0.01, atol=0.1 ), (
F'''{sum([1 for x in np.isclose(A__, A__, rtol=0.01, atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_0_0:.4f} %'''
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : int = urlparse(A__ )
return parsed.scheme in ("http", "https")
def a__ ( A__, A__, A__=True ):
SCREAMING_SNAKE_CASE_ : Optional[int] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE_ : Optional[int] = '/' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def a__ ( A__, A__, A__=None, A__=0, A__=None, ):
SCREAMING_SNAKE_CASE_ : Any = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A__, A__ ):
ua += "; " + "; ".join('{}/{}'.format(A__, A__ ) for k, v in user_agent.items() )
elif isinstance(A__, A__ ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE_ : str = {'user-agent': ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE_ : Tuple = 'bytes=%d-' % (resume_size,)
SCREAMING_SNAKE_CASE_ : Dict = requests.get(A__, stream=A__, proxies=A__, headers=A__ )
if response.status_code == 4_1_6: # Range not satisfiable
return
SCREAMING_SNAKE_CASE_ : int = response.headers.get('Content-Length' )
SCREAMING_SNAKE_CASE_ : Optional[int] = resume_size + int(A__ ) if content_length is not None else None
SCREAMING_SNAKE_CASE_ : List[str] = tqdm(
unit='B', unit_scale=A__, total=A__, initial=A__, desc='Downloading', )
for chunk in response.iter_content(chunk_size=1_0_2_4 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A__ ) )
temp_file.write(A__ )
progress.close()
def a__ ( A__, A__=None, A__=False, A__=None, A__=1_0, A__=False, A__=None, A__=False, ):
if cache_dir is None:
SCREAMING_SNAKE_CASE_ : List[str] = TRANSFORMERS_CACHE
if isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = str(A__ )
os.makedirs(A__, exist_ok=A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE_ : Any = requests.head(A__, allow_redirects=A__, proxies=A__, timeout=A__ )
if response.status_code == 2_0_0:
SCREAMING_SNAKE_CASE_ : Any = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE_ : List[Any] = url_to_filename(A__, A__ )
# get cache path to put the file
SCREAMING_SNAKE_CASE_ : int = os.path.join(A__, A__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A__ ):
return cache_path
else:
SCREAMING_SNAKE_CASE_ : str = [
file
for file in fnmatch.filter(os.listdir(A__ ), filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(A__ ) > 0:
return os.path.join(A__, matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(A__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE_ : Optional[Any] = cache_path + '.lock'
with FileLock(A__ ):
# If the download just completed while the lock was activated.
if os.path.exists(A__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE_ : List[str] = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(A__, 'a+b' ) as f:
yield f
SCREAMING_SNAKE_CASE_ : Any = _resumable_file_manager
if os.path.exists(A__ ):
SCREAMING_SNAKE_CASE_ : int = os.stat(A__ ).st_size
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
else:
SCREAMING_SNAKE_CASE_ : Any = partial(tempfile.NamedTemporaryFile, dir=A__, delete=A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s', A__, temp_file.name, )
http_get(
A__, A__, proxies=A__, resume_size=A__, user_agent=A__, )
os.replace(temp_file.name, A__ )
SCREAMING_SNAKE_CASE_ : Tuple = {'url': url, 'etag': etag}
SCREAMING_SNAKE_CASE_ : int = cache_path + '.json'
with open(A__, 'w' ) as meta_file:
json.dump(A__, A__ )
return cache_path
def a__ ( A__, A__=None ):
SCREAMING_SNAKE_CASE_ : List[str] = url.encode('utf-8' )
SCREAMING_SNAKE_CASE_ : List[Any] = shaaaa(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE_ : int = etag.encode('utf-8' )
SCREAMING_SNAKE_CASE_ : Any = shaaaa(A__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def a__ ( A__, A__=None, A__=False, A__=None, A__=False, A__=None, A__=False, A__=False, A__=False, ):
if cache_dir is None:
SCREAMING_SNAKE_CASE_ : Tuple = TRANSFORMERS_CACHE
if isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = str(A__ )
if isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(A__ )
if is_remote_url(A__ ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_from_cache(
A__, cache_dir=A__, force_download=A__, proxies=A__, resume_download=A__, user_agent=A__, local_files_only=A__, )
elif os.path.exists(A__ ):
# File, and it exists.
SCREAMING_SNAKE_CASE_ : List[Any] = url_or_filename
elif urlparse(A__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(A__ ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(A__ ) )
if extract_compressed_file:
if not is_zipfile(A__ ) and not tarfile.is_tarfile(A__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = os.path.split(A__ )
SCREAMING_SNAKE_CASE_ : Tuple = output_file.replace('.', '-' ) + '-extracted'
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(A__, A__ )
if os.path.isdir(A__ ) and os.listdir(A__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_path + '.lock'
with FileLock(A__ ):
shutil.rmtree(A__, ignore_errors=A__ )
os.makedirs(A__ )
if is_zipfile(A__ ):
with ZipFile(A__, 'r' ) as zip_file:
zip_file.extractall(A__ )
zip_file.close()
elif tarfile.is_tarfile(A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = tarfile.open(A__ )
tar_file.extractall(A__ )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(A__ ) )
return output_path_extracted
return output_path
def a__ ( A__, A__="," ):
assert isinstance(A__, A__ )
if os.path.isfile(A__ ):
with open(A__ ) as f:
SCREAMING_SNAKE_CASE_ : Dict = eval(f.read() )
else:
SCREAMING_SNAKE_CASE_ : Tuple = requests.get(A__ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = requests.json()
except Exception:
SCREAMING_SNAKE_CASE_ : Optional[int] = req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE_ : Dict = eval(A__ )
except Exception:
SCREAMING_SNAKE_CASE_ : Any = data.split('\n' )
req.close()
return data
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Any = requests.get(A__ )
SCREAMING_SNAKE_CASE_ : str = np.array(Image.open(BytesIO(response.content ) ) )
return img
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A__ )
with open(A__, 'rb' ) as stream:
SCREAMING_SNAKE_CASE_ : List[str] = pkl.load(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = weights.pop('model' )
SCREAMING_SNAKE_CASE_ : Dict = {}
for k, v in model.items():
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(A__ )
if "running_var" in k:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([0] )
SCREAMING_SNAKE_CASE_ : Tuple = k.replace('running_var', 'num_batches_tracked' )
SCREAMING_SNAKE_CASE_ : List[str] = zero
return new
def a__ ( ):
print(F'''{os.path.abspath(os.path.join(A__, os.pardir ) )}/demo.ipynb''' )
def a__ ( A__, A__="RGB" ):
assert isinstance(A__, A__ )
if os.path.isfile(A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = cva.imread(A__ )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_image_from_url(A__ )
assert img is not None, F'''could not connect to: {im}'''
SCREAMING_SNAKE_CASE_ : Tuple = cva.cvtColor(A__, cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE_ : Optional[Any] = img[:, :, ::-1]
return img
def a__ ( A__, A__=1 ):
return (images[i : i + batch] for i in range(0, len(A__ ), A__ ))
| 101 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 0 |
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ):
def update_area_of_max_square(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowercase : Dict = update_area_of_max_square(lowerCAmelCase_ , col + 1 )
__lowercase : Union[str, Any] = update_area_of_max_square(row + 1 , col + 1 )
__lowercase : Any = update_area_of_max_square(row + 1 , lowerCAmelCase_ )
if mat[row][col]:
__lowercase : List[str] = 1 + min([right, diagonal, down] )
__lowercase : int = max(largest_square_area[0] , lowerCAmelCase_ )
return sub_problem_sol
else:
return 0
__lowercase : List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ):
def update_area_of_max_square_using_dp_array(
lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowercase : Optional[int] = update_area_of_max_square_using_dp_array(lowerCAmelCase_ , col + 1 , lowerCAmelCase_ )
__lowercase : int = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCAmelCase_ )
__lowercase : str = update_area_of_max_square_using_dp_array(row + 1 , lowerCAmelCase_ , lowerCAmelCase_ )
if mat[row][col]:
__lowercase : Tuple = 1 + min([right, diagonal, down] )
__lowercase : Optional[int] = max(largest_square_area[0] , lowerCAmelCase_ )
__lowercase : List[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowercase : Optional[Any] = [0]
__lowercase : int = [[-1] * cols for _ in range(lowerCAmelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCAmelCase_ )
return largest_square_area[0]
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any ):
__lowercase : Any = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowercase : Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowercase : int = dp_array[row][col + 1]
__lowercase : int = dp_array[row + 1][col + 1]
__lowercase : List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowercase : int = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = max(dp_array[row][col] , lowerCAmelCase_ )
else:
__lowercase : str = 0
return largest_square_area
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str ):
__lowercase : Optional[int] = [0] * (cols + 1)
__lowercase : List[Any] = [0] * (cols + 1)
__lowercase : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowercase : Union[str, Any] = current_row[col + 1]
__lowercase : Any = next_row[col + 1]
__lowercase : Any = next_row[col]
if mat[row][col] == 1:
__lowercase : Tuple = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : int = max(current_row[col] , lowerCAmelCase_ )
else:
__lowercase : Any = 0
__lowercase : Optional[int] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 717 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 649 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__a : Optional[Any] = "src/transformers"
__a : Union[str, Any] = "docs/source/en"
__a : int = "."
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
with open(UpperCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase = f.readlines()
# Find the start prompt.
UpperCamelCase = 0
while not lines[start_index].startswith(UpperCamelCase__ ):
start_index += 1
start_index += 1
UpperCamelCase = start_index
while not lines[end_index].startswith(UpperCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__a : Optional[Any] = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
__a : Tuple = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
__a : Union[str, Any] = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__a : Dict = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
__a : Any = direct_transformers_import(TRANSFORMERS_PATH)
def __magic_name__ ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def __magic_name__ ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
UpperCamelCase = 2 if text == "✅" or text == "❌" else len(UpperCamelCase__ )
UpperCamelCase = (width - text_length) // 2
UpperCamelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __magic_name__ ( ) -> str:
'''simple docstring'''
UpperCamelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCamelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
UpperCamelCase = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
UpperCamelCase = collections.defaultdict(UpperCamelCase__ )
UpperCamelCase = collections.defaultdict(UpperCamelCase__ )
UpperCamelCase = collections.defaultdict(UpperCamelCase__ )
UpperCamelCase = collections.defaultdict(UpperCamelCase__ )
UpperCamelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCamelCase__ ):
UpperCamelCase = None
if attr_name.endswith("Tokenizer" ):
UpperCamelCase = slow_tokenizers
UpperCamelCase = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
UpperCamelCase = fast_tokenizers
UpperCamelCase = attr_name[:-13]
elif _re_tf_models.match(UpperCamelCase__ ) is not None:
UpperCamelCase = tf_models
UpperCamelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
UpperCamelCase = flax_models
UpperCamelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
UpperCamelCase = pt_models
UpperCamelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
UpperCamelCase = True
break
# Try again after removing the last word in the name
UpperCamelCase = "".join(camel_case_split(UpperCamelCase__ )[:-1] )
# Let's build that table!
UpperCamelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
UpperCamelCase = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
UpperCamelCase = [len(UpperCamelCase__ ) + 2 for c in columns]
UpperCamelCase = max([len(UpperCamelCase__ ) for name in model_names] ) + 2
# Build the table per se
UpperCamelCase = "|" + "|".join([_center_text(UpperCamelCase__ , UpperCamelCase__ ) for c, w in zip(UpperCamelCase__ , UpperCamelCase__ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
UpperCamelCase = {True: "✅", False: "❌"}
for name in model_names:
UpperCamelCase = model_name_to_prefix[name]
UpperCamelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCamelCase__ , UpperCamelCase__ ) for l, w in zip(UpperCamelCase__ , UpperCamelCase__ )] ) + "|\n"
return table
def __magic_name__ ( lowercase_=False ) -> int:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = _find_text_in_file(
filename=os.path.join(UpperCamelCase__ , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
UpperCamelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCamelCase__ , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__a : List[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 606 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase = BitConfig(
conv_layer=UpperCamelCase__ , num_labels=1000 , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
UpperCAmelCase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
UpperCAmelCase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase = '''bit.encoder.''' + name
return name
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> str:
'''simple docstring'''
UpperCAmelCase = get_config(UpperCamelCase__ )
# load original model from timm
UpperCAmelCase = create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
UpperCAmelCase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
UpperCAmelCase = BitForImageClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# create image processor
UpperCAmelCase = create_transform(**resolve_data_config({} , model=UpperCamelCase__ ) )
UpperCAmelCase = transform.transforms
UpperCAmelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCAmelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=UpperCamelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase = prepare_img()
UpperCAmelCase = transform(UpperCamelCase__ ).unsqueeze(0 )
UpperCAmelCase = processor(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase = model(UpperCamelCase__ )
UpperCAmelCase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__A : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 130 | 0 |
'''simple docstring'''
from math import factorial, radians
def snake_case_ ( a__ : float ,a__ : int = 18 ,a__ : int = 10 ):
"""simple docstring"""
__lowercase = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
__lowercase = radians(a__ )
__lowercase = angle_in_radians
__lowercase = 3
__lowercase = -1
for _ in range(a__ ):
result += (b * (angle_in_radians**a)) / factorial(a__ )
__lowercase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(a__ ,a__ )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 163 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger(__name__)
A : Dict = ["""model.decoder.embed_positions.weights"""]
def snake_case_ ( a__ : Union[str, Any] ):
"""simple docstring"""
if "emb" in name:
__lowercase = name.replace("""emb""" ,"""model.decoder.embed_tokens""" )
if "transformer" in name:
__lowercase = name.replace("""transformer""" ,"""model.decoder""" )
if "cross_attention" in name:
__lowercase = name.replace("""cross_attention""" ,"""encoder_attn""" )
if "linear1" in name:
__lowercase = name.replace("""linear1""" ,"""fc1""" )
if "linear2" in name:
__lowercase = name.replace("""linear2""" ,"""fc2""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" ,"""self_attn_layer_norm""" )
if "norm_cross" in name:
__lowercase = name.replace("""norm_cross""" ,"""encoder_attn_layer_norm""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" ,"""final_layer_norm""" )
if "out_norm" in name:
__lowercase = name.replace("""out_norm""" ,"""model.decoder.layer_norm""" )
if "linears" in name:
__lowercase = name.replace("""linears""" ,"""lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowercase = name.replace("""condition_provider.conditioners.description.output_proj""" ,"""enc_to_dec_proj""" )
return name
def snake_case_ ( a__ : OrderedDict ,a__ : int ):
"""simple docstring"""
__lowercase = list(state_dict.keys() )
__lowercase = {}
for key in keys:
__lowercase = state_dict.pop(a__ )
__lowercase = rename_keys(a__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowercase = val[:hidden_size, :]
__lowercase = val[hidden_size : 2 * hidden_size, :]
__lowercase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowercase = val
else:
__lowercase = val
return state_dict, enc_dec_proj_state_dict
def snake_case_ ( a__ : str ):
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowercase = 10_24
__lowercase = 24
__lowercase = 16
elif checkpoint == "medium":
__lowercase = 15_36
__lowercase = 48
__lowercase = 24
elif checkpoint == "large":
__lowercase = 20_48
__lowercase = 48
__lowercase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowercase = MusicgenDecoderConfig(
hidden_size=a__ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=a__ ,num_attention_heads=a__ ,)
return config
@torch.no_grad()
def snake_case_ ( a__ : Optional[Any] ,a__ : Dict=None ,a__ : Tuple=None ,a__ : Optional[int]="cpu" ):
"""simple docstring"""
__lowercase = MusicGen.get_pretrained(a__ ,device=a__ )
__lowercase = decoder_config_from_checkpoint(a__ )
__lowercase = fairseq_model.lm.state_dict()
__lowercase ,__lowercase = rename_state_dict(
a__ ,hidden_size=decoder_config.hidden_size )
__lowercase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowercase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowercase = MusicgenForCausalLM(a__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowercase ,__lowercase = decoder.load_state_dict(a__ ,strict=a__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a__ )
if len(a__ ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(a__ ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowercase = MusicgenForConditionalGeneration(text_encoder=a__ ,audio_encoder=a__ ,decoder=a__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a__ )
# check we can do a forward pass
__lowercase = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
__lowercase = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
__lowercase = model(input_ids=a__ ,decoder_input_ids=a__ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowercase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowercase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" ,padding_side="""left""" )
__lowercase = MusicgenProcessor(feature_extractor=a__ ,tokenizer=a__ )
# set the appropriate bos/pad token ids
__lowercase = 20_48
__lowercase = 20_48
# set other default generation config params
__lowercase = int(30 * audio_encoder.config.frame_rate )
__lowercase = True
__lowercase = 3.0
if pytorch_dump_folder is not None:
Path(a__ ).mkdir(exist_ok=a__ )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(a__ )
processor.push_to_hub(a__ )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
A : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 163 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 505 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_A = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_A = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_A = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowercase (_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
return float((preds == labels).mean() )
def lowercase (_snake_case ,_snake_case ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = simple_accuracy(_snake_case ,_snake_case )
__UpperCamelCase = float(fa_score(y_true=_snake_case ,y_pred=_snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase (_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
__UpperCamelCase = float(pearsonr(_snake_case ,_snake_case )[0] )
__UpperCamelCase = float(spearmanr(_snake_case ,_snake_case )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple )-> int:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def A ( self : str , A_ : Any , A_ : Tuple )-> Optional[Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" ) | 505 | 1 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase:
@staticmethod
def _lowercase ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCAmelCase( unittest.TestCase ):
__snake_case : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = DepthEstimationPipeline(model=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , SCREAMING_SNAKE_CASE )
import datasets
SCREAMING_SNAKE_CASE_ :Dict = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
SCREAMING_SNAKE_CASE_ :List[str] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , SCREAMING_SNAKE_CASE , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _lowercase ( self : str ):
"""simple docstring"""
pass
@slow
@require_torch
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = 'Intel/dpt-large'
SCREAMING_SNAKE_CASE_ :Union[str, Any] = pipeline('depth-estimation' , model=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
SCREAMING_SNAKE_CASE_ :int = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_62 )
@require_torch
def _lowercase ( self : Dict ):
"""simple docstring"""
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 233 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ :Any = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE_ :List[Any] = False
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE_ :int = False
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :Dict = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=SCREAMING_SNAKE_CASE , choices=['distilbert', 'roberta', 'gpt2'] , required=SCREAMING_SNAKE_CASE , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=SCREAMING_SNAKE_CASE , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=SCREAMING_SNAKE_CASE , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=SCREAMING_SNAKE_CASE , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=SCREAMING_SNAKE_CASE , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.1_5 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=SCREAMING_SNAKE_CASE , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=SCREAMING_SNAKE_CASE , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=SCREAMING_SNAKE_CASE , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=SCREAMING_SNAKE_CASE , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.0_5 , type=SCREAMING_SNAKE_CASE , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=SCREAMING_SNAKE_CASE , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=SCREAMING_SNAKE_CASE , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=SCREAMING_SNAKE_CASE , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.0_2 , type=SCREAMING_SNAKE_CASE , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=SCREAMING_SNAKE_CASE , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=SCREAMING_SNAKE_CASE , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=SCREAMING_SNAKE_CASE , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=SCREAMING_SNAKE_CASE , default=4000 , help='Checkpoint interval.' )
SCREAMING_SNAKE_CASE_ :List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE )
set_seed(SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE_ :Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
SCREAMING_SNAKE_CASE_ :Dict = special_tok_ids
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , 'rb' ) as fp:
SCREAMING_SNAKE_CASE_ :Any = pickle.load(SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , 'rb' ) as fp:
SCREAMING_SNAKE_CASE_ :Tuple = pickle.load(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = np.maximum(SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE_ :Tuple = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE_ :int = torch.from_numpy(SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ :List[Any] = None
SCREAMING_SNAKE_CASE_ :List[str] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE , data=SCREAMING_SNAKE_CASE )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
SCREAMING_SNAKE_CASE_ :Tuple = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE_ :Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
SCREAMING_SNAKE_CASE_ :List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ :str = student_model_class(SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info('Student loaded.' )
# TEACHER #
SCREAMING_SNAKE_CASE_ :Union[str, Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ :List[Any] = Distiller(
params=SCREAMING_SNAKE_CASE , dataset=SCREAMING_SNAKE_CASE , token_probs=SCREAMING_SNAKE_CASE , student=SCREAMING_SNAKE_CASE , teacher=SCREAMING_SNAKE_CASE )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 233 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = tempfile.mkdtemp()
UpperCamelCase__ : Dict = BlipImageProcessor()
UpperCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCamelCase__ : Optional[int] = BlipaProcessor(__magic_name__, __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Any:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__magic_name__ ).tokenizer
def UpperCamelCase__ ( self, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **__magic_name__ ).image_processor
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ : str = [Image.fromarray(np.moveaxis(__magic_name__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Tuple = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
UpperCamelCase__ : int = self.get_image_processor(do_normalize=__magic_name__, padding_value=1.0 )
UpperCamelCase__ : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=__magic_name__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : Any = BlipaProcessor(tokenizer=__magic_name__, image_processor=__magic_name__ )
UpperCamelCase__ : str = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = image_processor(__magic_name__, return_tensors='''np''' )
UpperCamelCase__ : Any = processor(images=__magic_name__, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Any = BlipaProcessor(tokenizer=__magic_name__, image_processor=__magic_name__ )
UpperCamelCase__ : Optional[int] = '''lower newer'''
UpperCamelCase__ : List[Any] = processor(text=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = tokenizer(__magic_name__, return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.get_image_processor()
UpperCamelCase__ : Optional[Any] = self.get_tokenizer()
UpperCamelCase__ : Union[str, Any] = BlipaProcessor(tokenizer=__magic_name__, image_processor=__magic_name__ )
UpperCamelCase__ : Optional[Any] = '''lower newer'''
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : Tuple = processor(text=__magic_name__, images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : int = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : str = BlipaProcessor(tokenizer=__magic_name__, image_processor=__magic_name__ )
UpperCamelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : Dict = processor.batch_decode(__magic_name__ )
UpperCamelCase__ : str = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Optional[int] = self.get_tokenizer()
UpperCamelCase__ : Optional[int] = BlipaProcessor(tokenizer=__magic_name__, image_processor=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = '''lower newer'''
UpperCamelCase__ : Dict = self.prepare_image_inputs()
UpperCamelCase__ : Union[str, Any] = processor(text=__magic_name__, images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 253 |
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool:
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool:
UpperCamelCase__ : Tuple = credit_card_number
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Any = len(__UpperCAmelCase ) - 2
for i in range(__UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ : List[str] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ : List[Any] = cc_number[:i] + str(__UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool:
UpperCamelCase__ : str = f"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(f"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(__UpperCAmelCase ) <= 16:
print(f"{error_message} of its length." )
return False
if not validate_initial_digits(__UpperCAmelCase ):
print(f"{error_message} of its first two digits." )
return False
if not luhn_validation(__UpperCAmelCase ):
print(f"{error_message} it fails the Luhn check." )
return False
print(f"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 253 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[Any]=13 , __snake_case : int=10 , __snake_case : Optional[Any]=3 , __snake_case : int=2 , __snake_case : List[Any]=2 , __snake_case : str=True , __snake_case : Tuple=True , __snake_case : Union[str, Any]=32 , __snake_case : Any=5 , __snake_case : Any=4 , __snake_case : Optional[Any]=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=10 , __snake_case : str=0.02 , __snake_case : Any="divided_space_time" , __snake_case : Union[str, Any]=None , ) -> Dict:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = num_frames
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = attention_type
lowerCamelCase = initializer_range
lowerCamelCase = scope
lowerCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowerCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCamelCase = self.num_labels
return config
def lowerCamelCase__ ( self : int , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = TimesformerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Any ) -> Tuple:
'''simple docstring'''
lowerCamelCase = TimesformerForVideoClassification(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase = model(__snake_case )
# verify the logits shape
lowerCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __snake_case )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
snake_case = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = TimesformerModelTester(self )
lowerCamelCase = ConfigTester(
self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Dict , __snake_case : str , __snake_case : Tuple , __snake_case : Dict=False ) -> str:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(__snake_case )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__snake_case )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TimesformerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = True
for model_class in self.all_model_classes:
lowerCamelCase = self.model_tester.seq_length
lowerCamelCase = self.model_tester.num_frames
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase = True
lowerCamelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCamelCase = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
lowerCamelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
def check_hidden_states_output(__snake_case : int , __snake_case : List[Any] , __snake_case : List[str] ):
lowerCamelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase = outputs.hidden_states
lowerCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__snake_case ) , __snake_case )
lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowerCamelCase = np.load(UpperCamelCase_ )
return list(UpperCamelCase_ )
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
lowerCamelCase = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
__snake_case )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_video()
lowerCamelCase = image_processor(video[:8] , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**__snake_case )
# verify the logits
lowerCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
| 246 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
lowerCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCamelCase = bertabert.config.encoder.vocab_size
lowerCamelCase = tokenizer.sep_token_id
lowerCamelCase = tokenizer.cls_token_id
lowerCamelCase = 128
lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
lowerCamelCase = train_dataset.select(range(32 ) )
lowerCamelCase = val_dataset.select(range(16 ) )
lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__snake_case : List[str] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase = tokenizer(batch['article'] , padding='max_length' , truncation=__snake_case , max_length=512 )
lowerCamelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=__snake_case , max_length=128 )
lowerCamelCase = inputs.input_ids
lowerCamelCase = inputs.attention_mask
lowerCamelCase = outputs.input_ids
lowerCamelCase = outputs.input_ids.copy()
lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
lowerCamelCase = outputs.attention_mask
assert all(len(__snake_case ) == 512 for x in inputs.input_ids )
assert all(len(__snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__snake_case : int ):
lowerCamelCase = pred.label_ids
lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
lowerCamelCase = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
lowerCamelCase = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__snake_case ) )] ) / len(__snake_case )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
lowerCamelCase = self.get_auto_remove_tmp_dir()
lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__snake_case , per_device_train_batch_size=__snake_case , per_device_eval_batch_size=__snake_case , predict_with_generate=__snake_case , evaluation_strategy='steps' , do_train=__snake_case , do_eval=__snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase = SeqaSeqTrainer(
model=__snake_case , args=__snake_case , compute_metrics=_compute_metrics , train_dataset=__snake_case , eval_dataset=__snake_case , tokenizer=__snake_case , )
# start training
trainer.train()
| 246 | 1 |
"""simple docstring"""
import re
def lowerCAmelCase__ ( lowerCamelCase__ ) -> bool:
A = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCamelCase__ , lowerCamelCase__ ) )
if __name__ == "__main__":
A = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 109 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCAmelCase__ ( ) -> Optional[Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
A = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCAmelCase__ ( ) -> str:
assert _test_patching.open is open
A = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , lowerCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCAmelCase__ ( ) -> List[Any]:
# pandas.read_csv is not present in _test_patching
A = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , lowerCamelCase__ ):
pass
def lowerCAmelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
A = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , lowerCamelCase__ ) is None
with patch_submodule(_test_patching , 'len' , lowerCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCAmelCase__ ( ) -> Union[str, Any]:
A = '__test_patch_submodule_start_and_stop_mock__'
A = patch_submodule(_test_patching , 'open' , lowerCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCAmelCase__ ( ) -> int:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
A = '__test_patch_submodule_successive_join__'
A = '__test_patch_submodule_successive_dirname__'
A = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.rename' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCAmelCase__ ( ) -> Optional[Any]:
A = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , lowerCamelCase__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , lowerCamelCase__ ):
pass
| 109 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
A_ = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
A_ = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
A_ = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def __UpperCamelCase ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="https://github.com/krishnap25/mauve" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Value("string" ,id="sequence" ),
} ) ,codebase_urls=["https://github.com/krishnap25/mauve"] ,reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] ,)
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : int="auto" ,SCREAMING_SNAKE_CASE__ : str=-1 ,SCREAMING_SNAKE_CASE__ : List[str]=0.9 ,SCREAMING_SNAKE_CASE__ : Tuple=5 ,SCREAMING_SNAKE_CASE__ : List[Any]=500 ,SCREAMING_SNAKE_CASE__ : Dict="gpt2-large" ,SCREAMING_SNAKE_CASE__ : Any=-1 ,SCREAMING_SNAKE_CASE__ : Tuple=1_024 ,SCREAMING_SNAKE_CASE__ : Dict=25 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Dict=25 ,):
SCREAMING_SNAKE_CASE:List[str] = compute_mauve(
p_text=lowerCAmelCase_ ,q_text=lowerCAmelCase_ ,p_features=lowerCAmelCase_ ,q_features=lowerCAmelCase_ ,p_tokens=lowerCAmelCase_ ,q_tokens=lowerCAmelCase_ ,num_buckets=lowerCAmelCase_ ,pca_max_data=lowerCAmelCase_ ,kmeans_explained_var=lowerCAmelCase_ ,kmeans_num_redo=lowerCAmelCase_ ,kmeans_max_iter=lowerCAmelCase_ ,featurize_model_name=lowerCAmelCase_ ,device_id=lowerCAmelCase_ ,max_text_length=lowerCAmelCase_ ,divergence_curve_discretization_size=lowerCAmelCase_ ,mauve_scaling_factor=lowerCAmelCase_ ,verbose=lowerCAmelCase_ ,seed=lowerCAmelCase_ ,)
return out
| 143 |
"""simple docstring"""
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = data
_SCREAMING_SNAKE_CASE : Tuple = previous
_SCREAMING_SNAKE_CASE : Any = next_node
def __str__( self ) -> str:
return F"""{self.data}"""
def A ( self ) -> int:
return self.data
def A ( self ) -> Dict:
return self.next
def A ( self ) -> Union[str, Any]:
return self.previous
class _lowerCAmelCase :
def __init__( self , lowerCAmelCase_ ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = head
def __iter__( self ) -> Any:
return self
def A ( self ) -> Dict:
if not self.current:
raise StopIteration
else:
_SCREAMING_SNAKE_CASE : Any = self.current.get_data()
_SCREAMING_SNAKE_CASE : Dict = self.current.get_next()
return value
class _lowerCAmelCase :
def __init__( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = None # First node in list
_SCREAMING_SNAKE_CASE : Union[str, Any] = None # Last node in list
def __str__( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = self.head
_SCREAMING_SNAKE_CASE : int = []
while current is not None:
nodes.append(current.get_data() )
_SCREAMING_SNAKE_CASE : List[Any] = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self , lowerCAmelCase_ ) -> Any:
_SCREAMING_SNAKE_CASE : Any = self.head
while current:
if current.get_data() == value:
return True
_SCREAMING_SNAKE_CASE : int = current.get_next()
return False
def __iter__( self ) -> str:
return LinkedListIterator(self.head )
def A ( self ) -> Dict:
if self.head:
return self.head.get_data()
return None
def A ( self ) -> List[Any]:
if self.tail:
return self.tail.get_data()
return None
def A ( self , lowerCAmelCase_ ) -> None:
if self.head is None:
_SCREAMING_SNAKE_CASE : List[Any] = node
_SCREAMING_SNAKE_CASE : Dict = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> None:
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Any = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = node
_SCREAMING_SNAKE_CASE : List[Any] = node.previous
if node.get_previous() is None:
_SCREAMING_SNAKE_CASE : List[Any] = node_to_insert
else:
_SCREAMING_SNAKE_CASE : Optional[int] = node_to_insert
_SCREAMING_SNAKE_CASE : Any = node_to_insert
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = node
_SCREAMING_SNAKE_CASE : Optional[int] = node.next
if node.get_next() is None:
_SCREAMING_SNAKE_CASE : Tuple = node_to_insert
else:
_SCREAMING_SNAKE_CASE : Any = node_to_insert
_SCREAMING_SNAKE_CASE : List[str] = node_to_insert
def A ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_SCREAMING_SNAKE_CASE : List[Any] = 1
_SCREAMING_SNAKE_CASE : str = Node(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
_SCREAMING_SNAKE_CASE : List[str] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> Node:
_SCREAMING_SNAKE_CASE : Tuple = self.head
while node:
if node.get_data() == item:
return node
_SCREAMING_SNAKE_CASE : Dict = node.get_next()
raise Exception('Node not found' )
def A ( self , lowerCAmelCase_ ) -> int:
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.head.get_next()
if node == self.tail:
_SCREAMING_SNAKE_CASE : int = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def A ( lowerCAmelCase_ ) -> None:
if node.get_next():
_SCREAMING_SNAKE_CASE : List[Any] = node.previous
if node.get_previous():
_SCREAMING_SNAKE_CASE : str = node.next
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : List[Any] = None
def A ( self ) -> List[str]:
return self.head is None
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 712 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Any:
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> str:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Dict:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 0 | 0 |
from collections.abc import Sequence
def _UpperCamelCase ( lowerCAmelCase_ = None ) ->int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
UpperCAmelCase = nums[0]
for i in range(1 , len(lowerCAmelCase_ ) ):
UpperCAmelCase = nums[i]
UpperCAmelCase = max(lowerCAmelCase_ , ans + num , lowerCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__a = int(input("""Enter number of elements : """).strip())
__a = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 377 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__( self : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Dict=1_0 , __lowerCamelCase : Optional[int]=[1_0, 2_0, 3_0, 4_0] , __lowerCamelCase : Optional[int]=[1, 1, 2, 1] , __lowerCamelCase : int=True , __lowerCamelCase : str=True , __lowerCamelCase : str="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : int=None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowercase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = TFResNetModel(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowercase ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFResNetForImageClassification(__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = TFResNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase = layer_type
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFResNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1e-4 ) )
| 377 | 1 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase__ ( _lowercase : list[int] ) -> list[int]:
__UpperCAmelCase: Union[str, Any] = 1
__UpperCAmelCase: Optional[Any] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase: list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase: Optional[Any] = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
__UpperCAmelCase: Optional[int] = 0
for b in range(_lowercase ):
for i in buckets[b]:
__UpperCAmelCase: str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod() | 466 | '''simple docstring'''
def UpperCamelCase__ ( _lowercase : int ) -> int:
if not isinstance(_lowercase , _lowercase ):
__UpperCAmelCase: List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCAmelCase: Dict = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
__UpperCAmelCase: int = 1
for i in range(1 , _lowercase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 466 | 1 |
"""simple docstring"""
import re
def _A ( _a : str ):
"""simple docstring"""
A = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_a , _a ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 617 |
"""simple docstring"""
from math import pow, sqrt
def _A ( *_a : float ):
"""simple docstring"""
A = len(_a ) > 0 and all(value > 0.0 for value in values )
return result
def _A ( _a : float , _a : float ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 617 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Tuple = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__lowerCamelCase : List[str] = spec.loader.load_module()
__lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Dict = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__lowerCamelCase : Optional[int] = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case__ : int = False
# source code of `config_class`
snake_case__ : Union[str, Any] = inspect.getsource(snake_case_ )
snake_case__ : int = _re_checkpoint.findall(snake_case_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case__, snake_case__ : List[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case__ : List[str] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case__ : List[Any] = True
break
snake_case__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(snake_case_ )
if len(snake_case_ ) > 0:
snake_case__ : Tuple = "\n".join(sorted(snake_case_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Any = logging.get_logger(__name__)
def __A ( a_ : Optional[int] , a_ : Any=False )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
SCREAMING_SNAKE_CASE : Dict = 'segformer.encoder.' + key
if key.startswith('''backbone''' ):
SCREAMING_SNAKE_CASE : str = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE : Optional[int] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
SCREAMING_SNAKE_CASE : List[Any] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(_A )-1}" )
if "norm" in key:
SCREAMING_SNAKE_CASE : List[Any] = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE : Tuple = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
SCREAMING_SNAKE_CASE : int = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(_A )-1}" )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE : Tuple = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE : int = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE : Union[str, Any] = key[key.find('''block''' ) + len('''block''' )]
SCREAMING_SNAKE_CASE : Tuple = key.replace(F"block{idx}" , F"block.{int(_A )-1}" )
if "attn.q" in key:
SCREAMING_SNAKE_CASE : str = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE : int = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
SCREAMING_SNAKE_CASE : List[Any] = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE : str = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE : Tuple = key[key.find('''linear_c''' ) + len('''linear_c''' )]
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(F"linear_c{idx}" , F"linear_c.{int(_A )-1}" )
if key.startswith('''head''' ):
SCREAMING_SNAKE_CASE : str = key.replace('''head''' , '''classifier''' )
SCREAMING_SNAKE_CASE : Any = value
return new_state_dict
def __A ( a_ : Dict , a_ : Tuple )-> List[str]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Any = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE : Dict = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE : str = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE : List[str] = kv_bias[
config.hidden_sizes[i] :
]
def __A ( )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def __A ( a_ : int , a_ : str , a_ : Tuple )-> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SegformerConfig()
SCREAMING_SNAKE_CASE : Any = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE : Dict = 'huggingface/label-files'
if "segformer" in model_name:
SCREAMING_SNAKE_CASE : Dict = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = 1_50
SCREAMING_SNAKE_CASE : Any = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE : Dict = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
SCREAMING_SNAKE_CASE : Any = 19
SCREAMING_SNAKE_CASE : Any = 'cityscapes-id2label.json'
SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 19, 1_28, 1_28)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Dict = model_name[4:6]
SCREAMING_SNAKE_CASE : Tuple = 10_00
SCREAMING_SNAKE_CASE : int = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE : Any = (1, 10_00)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
SCREAMING_SNAKE_CASE : str = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[str] = {int(_A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Tuple = idalabel
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE : Dict = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE : str = 2_56
elif size == "b2":
SCREAMING_SNAKE_CASE : str = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE : int = 7_68
SCREAMING_SNAKE_CASE : str = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE : Dict = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE : int = 7_68
SCREAMING_SNAKE_CASE : List[str] = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE : str = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE : Optional[int] = 7_68
SCREAMING_SNAKE_CASE : List[Any] = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE : int = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
SCREAMING_SNAKE_CASE : List[str] = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE : Any = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A )
# prepare image
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=_A , return_tensors='''pt''' ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE : Dict = torch.load(_A , map_location=torch.device('''cpu''' ) )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(_A , map_location=torch.device('''cpu''' ) )['state_dict']
# rename keys
SCREAMING_SNAKE_CASE : int = rename_keys(_A , encoder_only=_A )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_A , _A )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[Any] = SegformerForImageClassification(_A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = SegformerForSemanticSegmentation(_A )
model.load_state_dict(_A )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE : List[str] = model(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
SCREAMING_SNAKE_CASE : Optional[int] = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _A , atol=1E-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowerCamelCase__ : Any = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 698 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a__( lowerCamelCase__ ):
def __init__( self : int , __snake_case : Callable , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[dict] = None , __snake_case : Optional[int] = None , **__snake_case : Optional[int] , ):
super().__init__(
features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
a : List[Any] = Generator(
cache_dir=__snake_case , features=__snake_case , generator=__snake_case , gen_kwargs=__snake_case , **__snake_case , )
def lowercase_ ( self : Any ):
# Build iterable dataset
if self.streaming:
a : Optional[int] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a : List[Any] = None
a : Any = None
a : Optional[int] = None
a : List[str] = None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
a : Dict = self.builder.as_dataset(
split='train' , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset | 526 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=18 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , ) -> Dict:
'''simple docstring'''
A_ = size if size is not None else {"""shortest_edge""": 20}
A_ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_center_crop
A_ = crop_size
A_ = do_flip_channel_order
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A__ ( _snake_case , unittest.TestCase ):
lowercase = MobileViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = MobileViTImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """center_crop""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_flip_channel_order""" ) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "wavlm"
def __init__( self : List[str] , __snake_case : Optional[Any]=3_2 , __snake_case : List[Any]=7_6_8 , __snake_case : Any=1_2 , __snake_case : Dict=1_2 , __snake_case : str=3_0_7_2 , __snake_case : str="gelu" , __snake_case : Dict=0.1 , __snake_case : List[str]=0.1 , __snake_case : str=0.1 , __snake_case : Any=0.0 , __snake_case : Optional[int]=0.1 , __snake_case : int=0.1 , __snake_case : Optional[int]=0.02 , __snake_case : str=1E-5 , __snake_case : str="group" , __snake_case : Any="gelu" , __snake_case : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __snake_case : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __snake_case : List[Any]=(1_0, 3, 3, 3, 3, 2, 2) , __snake_case : Any=False , __snake_case : List[str]=1_2_8 , __snake_case : Union[str, Any]=1_6 , __snake_case : List[Any]=3_2_0 , __snake_case : List[Any]=8_0_0 , __snake_case : Optional[Any]=False , __snake_case : List[str]=True , __snake_case : List[str]=0.05 , __snake_case : Tuple=1_0 , __snake_case : List[Any]=2 , __snake_case : Any=0.0 , __snake_case : Optional[Any]=1_0 , __snake_case : int=3_2_0 , __snake_case : Optional[int]=2 , __snake_case : List[Any]=0.1 , __snake_case : Any=1_0_0 , __snake_case : Union[str, Any]=2_5_6 , __snake_case : Any=2_5_6 , __snake_case : List[Any]=0.1 , __snake_case : int="mean" , __snake_case : int=False , __snake_case : Any=False , __snake_case : List[str]=2_5_6 , __snake_case : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __snake_case : List[str]=(5, 3, 3, 1, 1) , __snake_case : Optional[Any]=(1, 2, 3, 1, 1) , __snake_case : Dict=5_1_2 , __snake_case : Tuple=8_0 , __snake_case : Optional[int]=0 , __snake_case : Optional[Any]=1 , __snake_case : Optional[int]=2 , __snake_case : int=False , __snake_case : Optional[Any]=3 , __snake_case : Optional[int]=2 , __snake_case : List[str]=3 , __snake_case : Optional[int]=None , **__snake_case : List[str] , ) -> Optional[int]:
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
__magic_name__: Tuple = hidden_size
__magic_name__: Any = feat_extract_norm
__magic_name__: List[str] = feat_extract_activation
__magic_name__: Optional[Any] = list(__snake_case )
__magic_name__: str = list(__snake_case )
__magic_name__: List[Any] = list(__snake_case )
__magic_name__: Union[str, Any] = conv_bias
__magic_name__: Union[str, Any] = num_buckets
__magic_name__: Any = max_bucket_distance
__magic_name__: Any = num_conv_pos_embeddings
__magic_name__: List[str] = num_conv_pos_embedding_groups
__magic_name__: Any = len(self.conv_dim )
__magic_name__: List[str] = num_hidden_layers
__magic_name__: Any = intermediate_size
__magic_name__: Union[str, Any] = hidden_act
__magic_name__: Any = num_attention_heads
__magic_name__: Tuple = hidden_dropout
__magic_name__: Any = attention_dropout
__magic_name__: List[str] = activation_dropout
__magic_name__: Union[str, Any] = feat_proj_dropout
__magic_name__: Optional[int] = final_dropout
__magic_name__: Tuple = layerdrop
__magic_name__: int = layer_norm_eps
__magic_name__: int = initializer_range
__magic_name__: List[Any] = num_ctc_classes
__magic_name__: List[Any] = vocab_size
__magic_name__: Dict = do_stable_layer_norm
__magic_name__: int = use_weighted_layer_sum
__magic_name__: List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__: Optional[int] = apply_spec_augment
__magic_name__: Union[str, Any] = mask_time_prob
__magic_name__: List[str] = mask_time_length
__magic_name__: Optional[Any] = mask_time_min_masks
__magic_name__: Optional[int] = mask_feature_prob
__magic_name__: Any = mask_feature_length
# parameters for pretraining with codevector quantized representations
__magic_name__: List[Any] = num_codevectors_per_group
__magic_name__: int = num_codevector_groups
__magic_name__: int = contrastive_logits_temperature
__magic_name__: Any = num_negatives
__magic_name__: Optional[Any] = codevector_dim
__magic_name__: str = proj_codevector_dim
__magic_name__: Tuple = diversity_loss_weight
# ctc loss
__magic_name__: Tuple = ctc_loss_reduction
__magic_name__: List[str] = ctc_zero_infinity
# adapter
__magic_name__: List[str] = add_adapter
__magic_name__: Any = adapter_kernel_size
__magic_name__: Tuple = adapter_stride
__magic_name__: List[Any] = num_adapter_layers
__magic_name__: List[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__magic_name__: Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__magic_name__: Optional[Any] = list(__snake_case )
__magic_name__: List[Any] = list(__snake_case )
__magic_name__: Any = list(__snake_case )
__magic_name__: Optional[int] = xvector_output_dim
@property
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 96 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ) -> Tuple:
__magic_name__: Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__magic_name__: Any = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCAmelCase )
# Let's go
__magic_name__: int = parser.parse_args()
if not hasattr(__UpperCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__magic_name__: Optional[Any] = args.func(__UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 96 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
_UpperCamelCase =DetaConfig(
backbone_config=__SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__SCREAMING_SNAKE_CASE , with_box_refine=__SCREAMING_SNAKE_CASE , two_stage=__SCREAMING_SNAKE_CASE , )
# set labels
_UpperCamelCase ='''huggingface/label-files'''
if "o365" in model_name:
_UpperCamelCase =366
_UpperCamelCase ='''object365-id2label.json'''
else:
_UpperCamelCase =91
_UpperCamelCase ='''coco-detection-id2label.json'''
_UpperCamelCase =num_labels
_UpperCamelCase =json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase =idalabel
_UpperCamelCase ={v: k for k, v in idalabel.items()}
return config
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =dct.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:dim, :]
_UpperCamelCase =in_proj_bias[: dim]
_UpperCamelCase =in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase =in_proj_bias[
dim : dim * 2
]
_UpperCamelCase =in_proj_weight[
-dim :, :
]
_UpperCamelCase =in_proj_bias[-dim :]
# fmt: on
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:hidden_size, :]
_UpperCamelCase =in_proj_bias[:hidden_size]
_UpperCamelCase =in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase =in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase =in_proj_weight[-hidden_size:, :]
_UpperCamelCase =in_proj_bias[-hidden_size:]
def _a ():
"""simple docstring"""
_UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =get_deta_config(__SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase =hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__SCREAMING_SNAKE_CASE , param.shape )
# rename keys
_UpperCamelCase =create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "input_proj" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
# finally, create HuggingFace model and load state dict
_UpperCamelCase =DetaForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
_UpperCamelCase ='''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__SCREAMING_SNAKE_CASE )
# load image processor
_UpperCamelCase =DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
_UpperCamelCase =prepare_img()
_UpperCamelCase =processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
_UpperCamelCase =encoding['''pixel_values''']
_UpperCamelCase =model(pixel_values.to(__SCREAMING_SNAKE_CASE ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase =torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_UpperCamelCase =torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_UpperCamelCase =torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 271 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 |
from __future__ import annotations
import math
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = size
# approximate the overall size of segment tree with given value
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
SCREAMING_SNAKE_CASE : Union[str, Any] = [0 for i in range(0 , 4 * size )]
SCREAMING_SNAKE_CASE : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Tuple , a : int ) -> int:
"""simple docstring"""
return idx * 2
def __UpperCamelCase ( self : str , a : int ) -> int:
"""simple docstring"""
return idx * 2 + 1
def __UpperCamelCase ( self : int , a : int , a : int , a : int , a : list[int] ) -> None:
"""simple docstring"""
if left_element == right_element:
SCREAMING_SNAKE_CASE : int = a[left_element - 1]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (left_element + right_element) // 2
self.build(self.left(a ) , a , a , a )
self.build(self.right(a ) , mid + 1 , a , a )
SCREAMING_SNAKE_CASE : List[Any] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : int , a : int , a : int , a : int , a : int ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : Any = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[str] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
SCREAMING_SNAKE_CASE : Optional[Any] = val
if left_element != right_element:
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : str = val
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
return True
SCREAMING_SNAKE_CASE : int = (left_element + right_element) // 2
self.update(self.left(a ) , a , a , a , a , a )
self.update(self.right(a ) , mid + 1 , a , a , a , a )
SCREAMING_SNAKE_CASE : Optional[int] = max(
self.segment_tree[self.left(a )] , self.segment_tree[self.right(a )] )
return True
def __UpperCamelCase ( self : Dict , a : int , a : int , a : int , a : int , a : int ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
SCREAMING_SNAKE_CASE : int = self.lazy[idx]
SCREAMING_SNAKE_CASE : List[Any] = False
if left_element != right_element:
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = self.lazy[idx]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
SCREAMING_SNAKE_CASE : Dict = (left_element + right_element) // 2
SCREAMING_SNAKE_CASE : Tuple = self.query(self.left(a ) , a , a , a , a )
SCREAMING_SNAKE_CASE : Tuple = self.query(self.right(a ) , mid + 1 , a , a , a )
return max(a , a )
def __str__( self : str ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , a , a ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
a_ = 15
a_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt) | 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Dict = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = ['''PerceiverFeatureExtractor''']
_UpperCAmelCase : Dict = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 | '''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Any ) -> List[str]:
_A = TextaTextGenerationPipeline(model=UpperCamelCase__, tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ) -> List[str]:
_A = generator('Something there' )
self.assertEqual(UpperCamelCase__, [{'generated_text': ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_A = generator(['This is great !', 'Something else'], num_return_sequences=2, do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
], )
_A = generator(
['This is great !', 'Something else'], num_return_sequences=2, batch_size=2, do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
], )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='pt' )
# do_sample=False necessary for reproducibility
_A = generator('Something there', do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, [{'generated_text': ''}] )
_A = 3
_A = generator(
'Something there', num_return_sequences=UpperCamelCase__, num_beams=UpperCamelCase__, )
_A = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
_A = generator('This is a test', do_sample=UpperCamelCase__, num_return_sequences=2, return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
], )
_A = generator.model.config.eos_token_id
_A = '<pad>'
_A = generator(
['This is a test', 'This is a second test'], do_sample=UpperCamelCase__, num_return_sequences=2, batch_size=2, return_tensors=UpperCamelCase__, )
self.assertEqual(
UpperCamelCase__, [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
], )
@require_tf
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_A = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='tf' )
# do_sample=False necessary for reproducibility
_A = generator('Something there', do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, [{'generated_text': ''}] )
| 107 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Callable, lowerCamelCase : Optional[Features] = None, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : Optional[dict] = None, lowerCamelCase : Optional[int] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(
features=__UpperCamelCase, cache_dir=__UpperCamelCase, keep_in_memory=__UpperCamelCase, streaming=__UpperCamelCase, num_proc=__UpperCamelCase, **__UpperCamelCase, )
lowercase__ = Generator(
cache_dir=__UpperCamelCase, features=__UpperCamelCase, generator=__UpperCamelCase, gen_kwargs=__UpperCamelCase, **__UpperCamelCase, )
def lowercase__ ( self : str ):
'''simple docstring'''
# Build iterable dataset
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase, download_mode=__UpperCamelCase, verification_mode=__UpperCamelCase, base_path=__UpperCamelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split='''train''', verification_mode=__UpperCamelCase, in_memory=self.keep_in_memory )
return dataset
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Tuple ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
UpperCAmelCase_ =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase_ =DDPMScheduler()
UpperCAmelCase_ =AudioDiffusionPipeline(vqvae=_lowerCAmelCase , unet=self.dummy_unet , mel=_lowerCAmelCase , scheduler=_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ =pipe(generator=_lowerCAmelCase , steps=4 )
UpperCAmelCase_ =output.audios[0]
UpperCAmelCase_ =output.images[0]
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ =pipe(generator=_lowerCAmelCase , steps=4 , return_dict=_lowerCAmelCase )
UpperCAmelCase_ =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_ =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase_ =DDIMScheduler()
UpperCAmelCase_ =self.dummy_vqvae_and_unet
UpperCAmelCase_ =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_lowerCAmelCase , scheduler=_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
np.random.seed(0 )
UpperCAmelCase_ =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ =pipe(raw_audio=_lowerCAmelCase , generator=_lowerCAmelCase , start_step=5 , steps=10 )
UpperCAmelCase_ =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_ =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ =self.dummy_unet_condition
UpperCAmelCase_ =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_lowerCAmelCase , mel=_lowerCAmelCase , scheduler=_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
np.random.seed(0 )
UpperCAmelCase_ =torch.rand((1, 1, 10) )
UpperCAmelCase_ =pipe(generator=_lowerCAmelCase , encoding=_lowerCAmelCase )
UpperCAmelCase_ =output.images[0]
UpperCAmelCase_ =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =torch_device
UpperCAmelCase_ =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ =pipe(generator=_lowerCAmelCase )
UpperCAmelCase_ =output.audios[0]
UpperCAmelCase_ =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_ =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 54 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=True , __lowerCamelCase : int="pt" ) -> int:
_snake_case = {'''add_prefix_space''': True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(''' ''' ) else {}
_snake_case = padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None , ) -> Any:
_snake_case = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase__ ( A_ ):
def __init__( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str="train" , _lowerCamelCase : List[str]=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]="" , ):
super().__init__()
_snake_case = Path(_lowerCamelCase ).joinpath(type_path + '''.source''' )
_snake_case = Path(_lowerCamelCase ).joinpath(type_path + '''.target''' )
_snake_case = self.get_char_lens(self.src_file )
_snake_case = max_source_length
_snake_case = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_snake_case = tokenizer
_snake_case = prefix
if n_obs is not None:
_snake_case = self.src_lens[:n_obs]
_snake_case = src_lang
_snake_case = tgt_lang
def __len__( self : List[str] ):
return len(self.src_lens )
def __getitem__( self : str , _lowerCamelCase : List[str] ):
_snake_case = index + 1 # linecache starts at 1
_snake_case = self.prefix + linecache.getline(str(self.src_file ) , _lowerCamelCase ).rstrip('''\n''' )
_snake_case = linecache.getline(str(self.tgt_file ) , _lowerCamelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
)
_snake_case = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
_snake_case = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_source_length , '''right''' )
_snake_case = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_target_length , '''right''' )
_snake_case = source_inputs['''input_ids'''].squeeze()
_snake_case = target_inputs['''input_ids'''].squeeze()
_snake_case = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( _lowerCamelCase : Optional[Any] ):
return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()]
def lowercase ( self : int , _lowerCamelCase : int ):
_snake_case = torch.stack([x['''input_ids'''] for x in batch] )
_snake_case = torch.stack([x['''attention_mask'''] for x in batch] )
_snake_case = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
_snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
_snake_case = trim_batch(_lowerCamelCase , _lowerCamelCase )
_snake_case , _snake_case = trim_batch(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase )
_snake_case = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : List[List] ) -> Any:
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case = get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , '''git_log.json''' ) )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=4 , **__lowerCamelCase : Union[str, Any] ) -> str:
with open(__lowerCamelCase , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]:
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def _UpperCAmelCase ( ) -> str:
_snake_case = git.Repo(search_parent_directories=__lowerCamelCase )
_snake_case = {
'''repo_id''': str(__lowerCamelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _UpperCAmelCase ( __lowerCamelCase : Callable , __lowerCamelCase : Iterable ) -> List:
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Tuple:
with open(__lowerCamelCase , '''wb''' ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
def remove_articles(__lowerCamelCase : Tuple ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : int ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : List[Any] ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> Any:
_snake_case = normalize_answer(__lowerCamelCase ).split()
_snake_case = normalize_answer(__lowerCamelCase ).split()
_snake_case = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
_snake_case = sum(common.values() )
if num_same == 0:
return 0
_snake_case = 1.0 * num_same / len(__lowerCamelCase )
_snake_case = 1.0 * num_same / len(__lowerCamelCase )
_snake_case = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : int ) -> Any:
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> Dict:
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_snake_case = 0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
return model_prefix.startswith('''rag''' )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
_snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_snake_case = '''dropout_rate'''
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
_snake_case = p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 224 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = Path(__snake_case )
dest_dir.mkdir(exist_ok=__snake_case )
for path in src_dir.iterdir():
_UpperCamelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase = dest_dir.joinpath(path.name )
print(__snake_case )
dest_path.open('''w''' ).write('''\n'''.join(__snake_case ) )
if __name__ == "__main__":
fire.Fire(minify)
| 78 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 78 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE__ = k.replace(UpperCamelCase__ , UpperCamelCase__ )
if k.startswith("""encoder""" ):
SCREAMING_SNAKE_CASE__ = k.replace(""".attn""" , """.self_attn""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
SCREAMING_SNAKE_CASE__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
SCREAMING_SNAKE_CASE__ = k.replace("""norm3""" , """final_layer_norm""" )
return k
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
SCREAMING_SNAKE_CASE__ = sd.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
SCREAMING_SNAKE_CASE__ = v
_lowerCamelCase = ['START']
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = model["""model"""]
SCREAMING_SNAKE_CASE__ = BlenderbotConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BlenderbotForConditionalGeneration(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = m.model.state_dict().keys()
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
SCREAMING_SNAKE_CASE__ = rename_state_dict_key(UpperCamelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
SCREAMING_SNAKE_CASE__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase__ )
m.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
m.half()
m.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
_lowerCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 6 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert) | 46 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_UpperCamelCase : List[str] ="platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = PegasusConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 'gelu'
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=20 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowerCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCamelCase = prepare_pegasus_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(_snake_case )
__lowerCamelCase = model.encode(inputs_dict['''input_ids'''] )
__lowerCamelCase , __lowerCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case )
__lowerCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
__lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_snake_case , )
__lowerCamelCase = model.decode(_snake_case , _snake_case )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(_snake_case )
__lowerCamelCase = model.encode(inputs_dict['''input_ids'''] )
__lowerCamelCase , __lowerCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__lowerCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
__lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , _snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_snake_case , decoder_position_ids=_snake_case , )
__lowerCamelCase = model.decode(_snake_case , _snake_case , decoder_attention_mask=_snake_case )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase_ ( A_ , A_ , A_ , A_=None , A_=None , ):
if attention_mask is None:
__lowerCamelCase = np.not_equal(A_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowerCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = FlaxPegasusModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_snake_case , _snake_case , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_snake_case , _snake_case , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(_snake_case , _snake_case )
__lowerCamelCase = model_class(_snake_case )
@jax.jit
def encode_jitted(_snake_case , _snake_case=None , **_snake_case ):
return model.encode(input_ids=_snake_case , attention_mask=_snake_case )
with self.subTest('''JIT Enabled''' ):
__lowerCamelCase = encode_jitted(**_snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowerCamelCase = encode_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = model_class(_snake_case )
__lowerCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__lowerCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_snake_case , _snake_case , _snake_case ):
return model.decode(
decoder_input_ids=_snake_case , decoder_attention_mask=_snake_case , encoder_outputs=_snake_case , )
with self.subTest('''JIT Enabled''' ):
__lowerCamelCase = decode_jitted(**_snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowerCamelCase = decode_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=_snake_case )
__lowerCamelCase = np.ones((1, 1) )
__lowerCamelCase = model(_snake_case )
self.assertIsNotNone(_snake_case )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
__lowerCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
__lowerCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__lowerCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
__lowerCamelCase = tokenizer(_snake_case , return_tensors='''np''' , truncation=_snake_case , max_length=5_12 , padding=_snake_case )
__lowerCamelCase = model.generate(**_snake_case , num_beams=2 ).sequences
__lowerCamelCase = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
assert tgt_text == decoded
| 713 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = old_name
if "patch_embed" in old_name:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = old_name.split('''.''' )
if layer == "0":
__lowerCamelCase = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__lowerCamelCase = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__lowerCamelCase = old_name.replace('''3''' , '''convolution2''' )
else:
__lowerCamelCase = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , A_ ):
__lowerCamelCase = R'''\b\d{2}\b'''
if bool(re.search(A_ , A_ ) ):
__lowerCamelCase = re.search(R'''\d\.\d\d.''' , A_ ).group()
else:
__lowerCamelCase = re.search(R'''\d\.\d.''' , A_ ).group()
if int(match[0] ) < 6:
__lowerCamelCase = old_name.replace(A_ , '''''' )
__lowerCamelCase = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__lowerCamelCase = '''intermediate_stages.''' + trimmed_name
else:
__lowerCamelCase = old_name.replace(A_ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__lowerCamelCase = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__lowerCamelCase = str(int(match[2] ) - num_meta4D_last_stage )
__lowerCamelCase = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__lowerCamelCase = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__lowerCamelCase = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__lowerCamelCase = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__lowerCamelCase = trimmed_name.replace('''fc2''' , '''linear_out''' )
__lowerCamelCase = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , A_ ):
__lowerCamelCase = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__lowerCamelCase = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCamelCase = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCamelCase = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__lowerCamelCase = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__lowerCamelCase = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__lowerCamelCase = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__lowerCamelCase = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCamelCase = new_name.replace('''norm''' , '''layernorm''' )
__lowerCamelCase = '''efficientformer.''' + new_name
else:
__lowerCamelCase = '''efficientformer.encoder.''' + new_name
return new_name
def lowerCamelCase_ ( A_ , A_ ):
for key in checkpoint.copy().keys():
__lowerCamelCase = checkpoint.pop(A_ )
__lowerCamelCase = val
return checkpoint
def lowerCamelCase_ ( ):
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = Image.open(requests.get(A_ , stream=A_ ).raw )
return image
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = torch.load(A_ , map_location='''cpu''' )['''model''']
__lowerCamelCase = EfficientFormerConfig.from_json_file(A_ )
__lowerCamelCase = EfficientFormerForImageClassificationWithTeacher(A_ )
__lowerCamelCase = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__lowerCamelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCamelCase = convert_torch_checkpoint(A_ , A_ )
model.load_state_dict(A_ )
model.eval()
__lowerCamelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__lowerCamelCase = prepare_img()
__lowerCamelCase = 2_56
__lowerCamelCase = 2_24
__lowerCamelCase = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__lowerCamelCase = processor(images=A_ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__lowerCamelCase = Compose(
[
Resize(A_ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(A_ ),
ToTensor(),
Normalize(A_ , A_ ),
] )
__lowerCamelCase = image_transforms(A_ ).unsqueeze(0 )
assert torch.allclose(A_ , A_ )
__lowerCamelCase = model(A_ )
__lowerCamelCase = outputs.logits
__lowerCamelCase = (1, 10_00)
if "l1" in model_name:
__lowerCamelCase = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCamelCase = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , A_ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCamelCase = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(A_ )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=A_ , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=A_ , )
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_UpperCamelCase : Tuple =parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 575 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 187 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_55 , __UpperCamelCase=True , ):
"""simple docstring"""
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
def __lowerCAmelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
snake_case_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
snake_case_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = DetaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = DetaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'size' ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_97_69, 'annotations': target}
# encode them
snake_case_ = DetaImageProcessor()
snake_case_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCamelCase ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCamelCase ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCamelCase ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCamelCase ) )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCamelCase ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCamelCase ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = DetaImageProcessor(format='coco_panoptic' )
snake_case_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCamelCase ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCamelCase ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCamelCase ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCamelCase ) )
# verify masks
snake_case_ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCamelCase )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCamelCase ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCamelCase ) )
| 187 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowercase_ : str = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Dict=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case : List[Any] = UNetaDModel(sample_size=lowerCamelCase_ , in_channels=3 , out_channels=3 )
_snake_case : Dict = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case : Optional[int] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowerCamelCase_ , )
_snake_case : Dict = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowerCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case : Tuple = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCamelCase_ ) for _ in range(4 )]
_snake_case : Any = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase_ ) for _ in range(4 )]
_snake_case : int = [torch.randint(0 , 10_00 , (4,) ).long().to(lowerCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case : Tuple = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
_snake_case : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case : Optional[int] = model(lowerCamelCase_ , timesteps[i] ).sample
_snake_case : str = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case : Any = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
_snake_case : List[str] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case : str = model(lowerCamelCase_ , timesteps[i] ).sample
_snake_case : Any = torch.nn.functional.mse_loss(lowerCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-5 ) )
| 652 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( _a ):
@staticmethod
@abstractmethod
def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
| 22 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def snake_case_ (UpperCamelCase : Tuple ):
'''simple docstring'''
def remove_articles(UpperCamelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : str ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) )
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )]
return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(UpperCamelCase )
_a = Counter(UpperCamelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(UpperCamelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = keeptmpscorea / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(UpperCamelCase ) > 0:
_a = deltmpscorea / len(UpperCamelCase )
# ADDITION
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = set(UpperCamelCase ) & set(UpperCamelCase )
_a = set(UpperCamelCase ) - set(UpperCamelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
if len(UpperCamelCase ) > 0:
_a = addtmpscore / len(UpperCamelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = len(UpperCamelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
ragramslist.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(UpperCamelCase )
for i in range(0 , len(UpperCamelCase ) - 1 ):
if i < len(UpperCamelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(UpperCamelCase )
if i < len(UpperCamelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
'''simple docstring'''
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] )
_a = sari_score / len(UpperCamelCase )
return 100 * sari_score
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ):
'''simple docstring'''
_a = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
_a = sacrebleu.corpus_bleu(
UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = {}
result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} )
return result
| 22 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = "roformer"
def __init__( self , SCREAMING_SNAKE_CASE__=5_00_00 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=15_36 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case: Any = vocab_size
snake_case: List[Any] = hidden_size if embedding_size is None else embedding_size
snake_case: str = hidden_size
snake_case: List[Any] = num_hidden_layers
snake_case: Union[str, Any] = num_attention_heads
snake_case: Optional[Any] = hidden_act
snake_case: List[str] = intermediate_size
snake_case: Any = hidden_dropout_prob
snake_case: str = attention_probs_dropout_prob
snake_case: Any = max_position_embeddings
snake_case: int = type_vocab_size
snake_case: str = initializer_range
snake_case: Tuple = layer_norm_eps
snake_case: Dict = rotary_value
snake_case: str = use_cache
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case: Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case: Union[str, Any] = {0: 'batch', 1: 'sequence'}
snake_case: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 720 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = question_encoder
snake_case: Union[str, Any] = generator
snake_case: Optional[int] = self.question_encoder
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case: int = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
snake_case: str = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
snake_case: Dict = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.question_encoder
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.generator
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "longest" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
snake_case: Optional[Any] = self.current_tokenizer.model_max_length
snake_case: int = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case: Any = self.current_tokenizer.model_max_length
snake_case: List[str] = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: Dict = labels['input_ids']
return model_inputs | 692 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_UpperCAmelCase : Dict = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
_UpperCAmelCase : Any = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
_UpperCAmelCase : Dict = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='https://github.com/krishnap25/mauve', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/krishnap25/mauve'], reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
], )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : str=None, UpperCamelCase__ : Any="auto", UpperCamelCase__ : Optional[int]=-1, UpperCamelCase__ : Dict=0.9, UpperCamelCase__ : List[str]=5, UpperCamelCase__ : Dict=5_00, UpperCamelCase__ : Optional[int]="gpt2-large", UpperCamelCase__ : Dict=-1, UpperCamelCase__ : Union[str, Any]=10_24, UpperCamelCase__ : str=25, UpperCamelCase__ : Any=5, UpperCamelCase__ : str=True, UpperCamelCase__ : List[Any]=25, ) -> Tuple:
_A = compute_mauve(
p_text=UpperCamelCase__, q_text=UpperCamelCase__, p_features=UpperCamelCase__, q_features=UpperCamelCase__, p_tokens=UpperCamelCase__, q_tokens=UpperCamelCase__, num_buckets=UpperCamelCase__, pca_max_data=UpperCamelCase__, kmeans_explained_var=UpperCamelCase__, kmeans_num_redo=UpperCamelCase__, kmeans_max_iter=UpperCamelCase__, featurize_model_name=UpperCamelCase__, device_id=UpperCamelCase__, max_text_length=UpperCamelCase__, divergence_curve_discretization_size=UpperCamelCase__, mauve_scaling_factor=UpperCamelCase__, verbose=UpperCamelCase__, seed=UpperCamelCase__, )
return out
| 107 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 0 |
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
UpperCAmelCase__ = ""
while len(__lowerCAmelCase ) % 3 != 0:
UpperCAmelCase__ = "0" + bin_string
UpperCAmelCase__ = [
bin_string[index : index + 3]
for index in range(len(__lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCAmelCase__ = 0
for index, val in enumerate(__lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCAmelCase ) )
oct_string += str(__lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
_A = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_A = ''
_A = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_A , _A = 0, 0
# length[i] shows the length of palindromic substring with center i
_A = [1 for i in range(len(_snake_case ) )]
# for each character in new_string find corresponding palindromic string
_A = 0
for j in range(len(_snake_case ) ):
_A = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_A = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_A = j - k + 1 # noqa: E741
_A = j + k - 1
# update max_length and start position
if max_length < length[j]:
_A = length[j]
_A = j
# create that string
_A = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
'''simple docstring'''
__snake_case: Tuple = "Tobias Carryer"
from time import time
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=int(time() ) ): # noqa: B008
'''simple docstring'''
a_ : Optional[int] = multiplier
a_ : Any = increment
a_ : Dict = modulo
a_ : List[str] = seed
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case: Any = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 577 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A__ : List[Any] =None
A__ : List[str] =logging.get_logger(__name__)
A__ : List[Any] ={'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
A__ : Dict ={
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
A__ : int ={
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
A__ : List[str] ='▁'
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase =VOCAB_FILES_NAMES
lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase =['''input_ids''', '''attention_mask''']
lowerCamelCase =BarthezTokenizer
def __init__( self : Any , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Dict=None , lowerCamelCase : str="<s>" , lowerCamelCase : Tuple="</s>" , lowerCamelCase : List[str]="</s>" , lowerCamelCase : Optional[int]="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : str="<pad>" , lowerCamelCase : Optional[Any]="<mask>" , **lowerCamelCase : List[str] , ):
"""simple docstring"""
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , **lowerCamelCase , )
__A : str = vocab_file
__A : Union[str, Any] = False if not self.vocab_file else True
def lowercase_( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
__A : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
__A : Optional[int] = [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__A : List[Any] = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 499 |
'''simple docstring'''
from __future__ import annotations
def A_ ( __SCREAMING_SNAKE_CASE : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
__A : List[Any] = nums[0]
__A : Union[str, Any] = 0
for num in nums[1:]:
__A , __A : Union[str, Any] = (
max_excluding + num,
max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
)
return max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 499 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.